Fabrice-TIERCELIN commited on
Commit
7caba14
·
verified ·
1 Parent(s): d7514c8

Change model

Browse files
Files changed (1) hide show
  1. app.py +488 -488
app.py CHANGED
@@ -1,488 +1,488 @@
1
- from diffusers_helper.hf_login import login
2
-
3
- import os
4
-
5
- os.environ['HF_HOME'] = os.path.abspath(os.path.realpath(os.path.join(os.path.dirname(__file__), './hf_download')))
6
-
7
- import gradio as gr
8
- import torch
9
- import traceback
10
- import einops
11
- import safetensors.torch as sf
12
- import numpy as np
13
- import math
14
- import spaces
15
-
16
- from PIL import Image
17
- from diffusers import AutoencoderKLHunyuanVideo
18
- from transformers import LlamaModel, CLIPTextModel, LlamaTokenizerFast, CLIPTokenizer
19
- from diffusers_helper.hunyuan import encode_prompt_conds, vae_decode, vae_encode, vae_decode_fake
20
- from diffusers_helper.utils import save_bcthw_as_mp4, crop_or_pad_yield_mask, soft_append_bcthw, resize_and_center_crop, state_dict_weighted_merge, state_dict_offset_merge, generate_timestamp
21
- from diffusers_helper.models.hunyuan_video_packed import HunyuanVideoTransformer3DModelPacked
22
- from diffusers_helper.pipelines.k_diffusion_hunyuan import sample_hunyuan
23
- from diffusers_helper.memory import cpu, gpu, get_cuda_free_memory_gb, move_model_to_device_with_memory_preservation, offload_model_from_device_for_memory_preservation, fake_diffusers_current_device, DynamicSwapInstaller, unload_complete_models, load_model_as_complete
24
- from diffusers_helper.thread_utils import AsyncStream, async_run
25
- from diffusers_helper.gradio.progress_bar import make_progress_bar_css, make_progress_bar_html
26
- from transformers import SiglipImageProcessor, SiglipVisionModel
27
- from diffusers_helper.clip_vision import hf_clip_vision_encode
28
- from diffusers_helper.bucket_tools import find_nearest_bucket
29
-
30
-
31
- free_mem_gb = get_cuda_free_memory_gb(gpu)
32
- high_vram = free_mem_gb > 80
33
-
34
- print(f'Free VRAM {free_mem_gb} GB')
35
- print(f'High-VRAM Mode: {high_vram}')
36
-
37
- text_encoder = LlamaModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder', torch_dtype=torch.float16).cpu()
38
- text_encoder_2 = CLIPTextModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder_2', torch_dtype=torch.float16).cpu()
39
- tokenizer = LlamaTokenizerFast.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer')
40
- tokenizer_2 = CLIPTokenizer.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer_2')
41
- vae = AutoencoderKLHunyuanVideo.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='vae', torch_dtype=torch.float16).cpu()
42
-
43
- feature_extractor = SiglipImageProcessor.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='feature_extractor')
44
- image_encoder = SiglipVisionModel.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='image_encoder', torch_dtype=torch.float16).cpu()
45
-
46
- # quant_config = DiffusersBitsAndBytesConfig(load_in_8bit=True)
47
- # transformer = HunyuanVideoTransformer3DModelPacked.from_single_file("https://huggingface.co/sirolim/FramePack_F1_I2V_FP8/resolve/main/FramePack_F1_I2V_HY_fp8_e4m3fn.safetensors", torch_dtype=torch.bfloat16)
48
- # transformer = HunyuanVideoTransformer3DModelPacked.from_single_file('sirolim/FramePack_F1_I2V_FP8', "FramePack_F1_I2V_HY_fp8_e4m3fn.safetensors", use_safetensors=True, torch_dtype=torch.bfloat16).cpu()
49
- transformer = HunyuanVideoTransformer3DModelPacked.from_pretrained('lllyasviel/FramePack_F1_I2V_HY_20250503', torch_dtype=torch.bfloat16).cpu()
50
-
51
- vae.eval()
52
- text_encoder.eval()
53
- text_encoder_2.eval()
54
- image_encoder.eval()
55
- transformer.eval()
56
-
57
- if not high_vram:
58
- vae.enable_slicing()
59
- vae.enable_tiling()
60
-
61
- transformer.high_quality_fp32_output_for_inference = True
62
- print('transformer.high_quality_fp32_output_for_inference = True')
63
-
64
- transformer.to(dtype=torch.bfloat16)
65
- vae.to(dtype=torch.float16)
66
- image_encoder.to(dtype=torch.float16)
67
- text_encoder.to(dtype=torch.float16)
68
- text_encoder_2.to(dtype=torch.float16)
69
-
70
- vae.requires_grad_(False)
71
- text_encoder.requires_grad_(False)
72
- text_encoder_2.requires_grad_(False)
73
- image_encoder.requires_grad_(False)
74
- transformer.requires_grad_(False)
75
-
76
- if not high_vram:
77
- # DynamicSwapInstaller is same as huggingface's enable_sequential_offload but 3x faster
78
- DynamicSwapInstaller.install_model(transformer, device=gpu)
79
- DynamicSwapInstaller.install_model(text_encoder, device=gpu)
80
- else:
81
- text_encoder.to(gpu)
82
- text_encoder_2.to(gpu)
83
- image_encoder.to(gpu)
84
- vae.to(gpu)
85
- transformer.to(gpu)
86
-
87
- stream = AsyncStream()
88
-
89
- outputs_folder = './outputs/'
90
- os.makedirs(outputs_folder, exist_ok=True)
91
-
92
- examples = [
93
- ["img_examples/1.png", "The girl dances gracefully, with clear movements, full of charm.",],
94
- ["img_examples/2.jpg", "The man dances flamboyantly, swinging his hips and striking bold poses with dramatic flair."],
95
- ["img_examples/3.png", "The woman dances elegantly among the blossoms, spinning slowly with flowing sleeves and graceful hand movements."],
96
- ]
97
-
98
- def generate_examples(input_image, prompt):
99
-
100
- t2v=False
101
- n_prompt=""
102
- seed=31337
103
- total_second_length=5
104
- latent_window_size=9
105
- steps=25
106
- cfg=1.0
107
- gs=10.0
108
- rs=0.0
109
- gpu_memory_preservation=6
110
- use_teacache=True
111
- mp4_crf=16
112
-
113
- global stream
114
-
115
- # assert input_image is not None, 'No input image!'
116
- if t2v:
117
- default_height, default_width = 640, 640
118
- input_image = np.ones((default_height, default_width, 3), dtype=np.uint8) * 255
119
- print("No input image provided. Using a blank white image.")
120
-
121
- yield None, None, '', '', gr.update(interactive=False), gr.update(interactive=True)
122
-
123
- stream = AsyncStream()
124
-
125
- async_run(worker, input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf)
126
-
127
- output_filename = None
128
-
129
- while True:
130
- flag, data = stream.output_queue.next()
131
-
132
- if flag == 'file':
133
- output_filename = data
134
- yield output_filename, gr.update(), gr.update(), gr.update(), gr.update(interactive=False), gr.update(interactive=True)
135
-
136
- if flag == 'progress':
137
- preview, desc, html = data
138
- yield gr.update(), gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True)
139
-
140
- if flag == 'end':
141
- yield output_filename, gr.update(visible=False), gr.update(), '', gr.update(interactive=True), gr.update(interactive=False)
142
- break
143
-
144
-
145
-
146
- @torch.no_grad()
147
- def worker(input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf):
148
- total_latent_sections = (total_second_length * 30) / (latent_window_size * 4)
149
- total_latent_sections = int(max(round(total_latent_sections), 1))
150
-
151
- job_id = generate_timestamp()
152
-
153
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Starting ...'))))
154
-
155
- try:
156
- # Clean GPU
157
- if not high_vram:
158
- unload_complete_models(
159
- text_encoder, text_encoder_2, image_encoder, vae, transformer
160
- )
161
-
162
- # Text encoding
163
-
164
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Text encoding ...'))))
165
-
166
- if not high_vram:
167
- fake_diffusers_current_device(text_encoder, gpu) # since we only encode one text - that is one model move and one encode, offload is same time consumption since it is also one load and one encode.
168
- load_model_as_complete(text_encoder_2, target_device=gpu)
169
-
170
- llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
171
-
172
- if cfg == 1:
173
- llama_vec_n, clip_l_pooler_n = torch.zeros_like(llama_vec), torch.zeros_like(clip_l_pooler)
174
- else:
175
- llama_vec_n, clip_l_pooler_n = encode_prompt_conds(n_prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
176
-
177
- llama_vec, llama_attention_mask = crop_or_pad_yield_mask(llama_vec, length=512)
178
- llama_vec_n, llama_attention_mask_n = crop_or_pad_yield_mask(llama_vec_n, length=512)
179
-
180
- # Processing input image
181
-
182
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Image processing ...'))))
183
-
184
- H, W, C = input_image.shape
185
- height, width = find_nearest_bucket(H, W, resolution=640)
186
- input_image_np = resize_and_center_crop(input_image, target_width=width, target_height=height)
187
-
188
- Image.fromarray(input_image_np).save(os.path.join(outputs_folder, f'{job_id}.png'))
189
-
190
- input_image_pt = torch.from_numpy(input_image_np).float() / 127.5 - 1
191
- input_image_pt = input_image_pt.permute(2, 0, 1)[None, :, None]
192
-
193
- # VAE encoding
194
-
195
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'VAE encoding ...'))))
196
-
197
- if not high_vram:
198
- load_model_as_complete(vae, target_device=gpu)
199
-
200
- start_latent = vae_encode(input_image_pt, vae)
201
-
202
- # CLIP Vision
203
-
204
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'CLIP Vision encoding ...'))))
205
-
206
- if not high_vram:
207
- load_model_as_complete(image_encoder, target_device=gpu)
208
-
209
- image_encoder_output = hf_clip_vision_encode(input_image_np, feature_extractor, image_encoder)
210
- image_encoder_last_hidden_state = image_encoder_output.last_hidden_state
211
-
212
- # Dtype
213
-
214
- llama_vec = llama_vec.to(transformer.dtype)
215
- llama_vec_n = llama_vec_n.to(transformer.dtype)
216
- clip_l_pooler = clip_l_pooler.to(transformer.dtype)
217
- clip_l_pooler_n = clip_l_pooler_n.to(transformer.dtype)
218
- image_encoder_last_hidden_state = image_encoder_last_hidden_state.to(transformer.dtype)
219
-
220
- # Sampling
221
-
222
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Start sampling ...'))))
223
-
224
- rnd = torch.Generator("cpu").manual_seed(seed)
225
-
226
- history_latents = torch.zeros(size=(1, 16, 16 + 2 + 1, height // 8, width // 8), dtype=torch.float32).cpu()
227
- history_pixels = None
228
-
229
- history_latents = torch.cat([history_latents, start_latent.to(history_latents)], dim=2)
230
- total_generated_latent_frames = 1
231
-
232
- for section_index in range(total_latent_sections):
233
- if stream.input_queue.top() == 'end':
234
- stream.output_queue.push(('end', None))
235
- return
236
-
237
- print(f'section_index = {section_index}, total_latent_sections = {total_latent_sections}')
238
-
239
- if not high_vram:
240
- unload_complete_models()
241
- move_model_to_device_with_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=gpu_memory_preservation)
242
-
243
- if use_teacache:
244
- transformer.initialize_teacache(enable_teacache=True, num_steps=steps)
245
- else:
246
- transformer.initialize_teacache(enable_teacache=False)
247
-
248
- def callback(d):
249
- preview = d['denoised']
250
- preview = vae_decode_fake(preview)
251
-
252
- preview = (preview * 255.0).detach().cpu().numpy().clip(0, 255).astype(np.uint8)
253
- preview = einops.rearrange(preview, 'b c t h w -> (b h) (t w) c')
254
-
255
- if stream.input_queue.top() == 'end':
256
- stream.output_queue.push(('end', None))
257
- raise KeyboardInterrupt('User ends the task.')
258
-
259
- current_step = d['i'] + 1
260
- percentage = int(100.0 * current_step / steps)
261
- hint = f'Sampling {current_step}/{steps}'
262
- desc = f'Total generated frames: {int(max(0, total_generated_latent_frames * 4 - 3))}, Video length: {max(0, (total_generated_latent_frames * 4 - 3) / 30) :.2f} seconds (FPS-30). The video is being extended now ...'
263
- stream.output_queue.push(('progress', (preview, desc, make_progress_bar_html(percentage, hint))))
264
- return
265
-
266
- indices = torch.arange(0, sum([1, 16, 2, 1, latent_window_size])).unsqueeze(0)
267
- clean_latent_indices_start, clean_latent_4x_indices, clean_latent_2x_indices, clean_latent_1x_indices, latent_indices = indices.split([1, 16, 2, 1, latent_window_size], dim=1)
268
- clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1)
269
-
270
- clean_latents_4x, clean_latents_2x, clean_latents_1x = history_latents[:, :, -sum([16, 2, 1]):, :, :].split([16, 2, 1], dim=2)
271
- clean_latents = torch.cat([start_latent.to(history_latents), clean_latents_1x], dim=2)
272
-
273
- generated_latents = sample_hunyuan(
274
- transformer=transformer,
275
- sampler='unipc',
276
- width=width,
277
- height=height,
278
- frames=latent_window_size * 4 - 3,
279
- real_guidance_scale=cfg,
280
- distilled_guidance_scale=gs,
281
- guidance_rescale=rs,
282
- # shift=3.0,
283
- num_inference_steps=steps,
284
- generator=rnd,
285
- prompt_embeds=llama_vec,
286
- prompt_embeds_mask=llama_attention_mask,
287
- prompt_poolers=clip_l_pooler,
288
- negative_prompt_embeds=llama_vec_n,
289
- negative_prompt_embeds_mask=llama_attention_mask_n,
290
- negative_prompt_poolers=clip_l_pooler_n,
291
- device=gpu,
292
- dtype=torch.bfloat16,
293
- image_embeddings=image_encoder_last_hidden_state,
294
- latent_indices=latent_indices,
295
- clean_latents=clean_latents,
296
- clean_latent_indices=clean_latent_indices,
297
- clean_latents_2x=clean_latents_2x,
298
- clean_latent_2x_indices=clean_latent_2x_indices,
299
- clean_latents_4x=clean_latents_4x,
300
- clean_latent_4x_indices=clean_latent_4x_indices,
301
- callback=callback,
302
- )
303
-
304
- total_generated_latent_frames += int(generated_latents.shape[2])
305
- history_latents = torch.cat([history_latents, generated_latents.to(history_latents)], dim=2)
306
-
307
- if not high_vram:
308
- offload_model_from_device_for_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=8)
309
- load_model_as_complete(vae, target_device=gpu)
310
-
311
- real_history_latents = history_latents[:, :, -total_generated_latent_frames:, :, :]
312
-
313
- if history_pixels is None:
314
- history_pixels = vae_decode(real_history_latents, vae).cpu()
315
- else:
316
- section_latent_frames = latent_window_size * 2
317
- overlapped_frames = latent_window_size * 4 - 3
318
-
319
- current_pixels = vae_decode(real_history_latents[:, :, -section_latent_frames:], vae).cpu()
320
- history_pixels = soft_append_bcthw(history_pixels, current_pixels, overlapped_frames)
321
-
322
- if not high_vram:
323
- unload_complete_models()
324
-
325
- output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4')
326
-
327
- save_bcthw_as_mp4(history_pixels, output_filename, fps=30, crf=mp4_crf)
328
-
329
- print(f'Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}')
330
-
331
- stream.output_queue.push(('file', output_filename))
332
- except:
333
- traceback.print_exc()
334
-
335
- if not high_vram:
336
- unload_complete_models(
337
- text_encoder, text_encoder_2, image_encoder, vae, transformer
338
- )
339
-
340
- stream.output_queue.push(('end', None))
341
- return
342
-
343
- def get_duration(input_image, prompt, t2v, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf):
344
- return total_second_length * 60
345
-
346
- @spaces.GPU(duration=get_duration)
347
- def process(input_image, prompt,
348
- t2v=False,
349
- n_prompt="",
350
- seed=31337,
351
- total_second_length=5,
352
- latent_window_size=9,
353
- steps=25,
354
- cfg=1.0,
355
- gs=10.0,
356
- rs=0.0,
357
- gpu_memory_preservation=6,
358
- use_teacache=True,
359
- mp4_crf=16
360
- ):
361
- global stream
362
-
363
- # assert input_image is not None, 'No input image!'
364
- if t2v:
365
- default_height, default_width = 640, 640
366
- input_image = np.ones((default_height, default_width, 3), dtype=np.uint8) * 255
367
- print("No input image provided. Using a blank white image.")
368
- else:
369
- composite_rgba_uint8 = input_image["composite"]
370
-
371
- # rgb_uint8 will be (H, W, 3), dtype uint8
372
- rgb_uint8 = composite_rgba_uint8[:, :, :3]
373
- # mask_uint8 will be (H, W), dtype uint8
374
- mask_uint8 = composite_rgba_uint8[:, :, 3]
375
-
376
- # Create background
377
- h, w = rgb_uint8.shape[:2]
378
- # White background, (H, W, 3), dtype uint8
379
- background_uint8 = np.full((h, w, 3), 255, dtype=np.uint8)
380
-
381
- # Normalize mask to range [0.0, 1.0].
382
- alpha_normalized_float32 = mask_uint8.astype(np.float32) / 255.0
383
-
384
- # Expand alpha to 3 channels to match RGB images for broadcasting.
385
- # alpha_mask_float32 will have shape (H, W, 3)
386
- alpha_mask_float32 = np.stack([alpha_normalized_float32] * 3, axis=2)
387
-
388
- # alpha blending
389
- blended_image_float32 = rgb_uint8.astype(np.float32) * alpha_mask_float32 + \
390
- background_uint8.astype(np.float32) * (1.0 - alpha_mask_float32)
391
-
392
- input_image = np.clip(blended_image_float32, 0, 255).astype(np.uint8)
393
-
394
- yield None, None, '', '', gr.update(interactive=False), gr.update(interactive=True)
395
-
396
- stream = AsyncStream()
397
-
398
- async_run(worker, input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf)
399
-
400
- output_filename = None
401
-
402
- while True:
403
- flag, data = stream.output_queue.next()
404
-
405
- if flag == 'file':
406
- output_filename = data
407
- yield output_filename, gr.update(), gr.update(), gr.update(), gr.update(interactive=False), gr.update(interactive=True)
408
-
409
- if flag == 'progress':
410
- preview, desc, html = data
411
- yield gr.update(), gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True)
412
-
413
- if flag == 'end':
414
- yield output_filename, gr.update(visible=False), gr.update(), '', gr.update(interactive=True), gr.update(interactive=False)
415
- break
416
-
417
-
418
- def end_process():
419
- stream.input_queue.push('end')
420
-
421
-
422
- quick_prompts = [
423
- 'The girl dances gracefully, with clear movements, full of charm.',
424
- 'A character doing some simple body movements.',
425
- ]
426
- quick_prompts = [[x] for x in quick_prompts]
427
-
428
-
429
- css = make_progress_bar_css()
430
- block = gr.Blocks(css=css).queue()
431
- with block:
432
- gr.Markdown('# FramePack Essentials | Experimentation in Progress')
433
- gr.Markdown(f"""### Space is constantly being tinkered with, expect downtime and errors.
434
- """)
435
- with gr.Row():
436
- with gr.Column():
437
- input_image = gr.ImageEditor(type="numpy", label="Image", height=320, brush=gr.Brush(colors=["#ffffff"]))
438
- prompt = gr.Textbox(label="Prompt", value='')
439
- t2v = gr.Checkbox(label="do text-to-video", value=False)
440
- example_quick_prompts = gr.Dataset(samples=quick_prompts, label='Quick List', samples_per_page=1000, components=[prompt])
441
- example_quick_prompts.click(lambda x: x[0], inputs=[example_quick_prompts], outputs=prompt, show_progress=False, queue=False)
442
-
443
- with gr.Row():
444
- start_button = gr.Button(value="Start Generation")
445
- end_button = gr.Button(value="End Generation", interactive=False)
446
-
447
- total_second_length = gr.Slider(label="Total Video Length (Seconds)", minimum=1, maximum=5, value=2, step=0.1)
448
- with gr.Group():
449
- with gr.Accordion("Advanced settings", open=False):
450
- use_teacache = gr.Checkbox(label='Use TeaCache', value=True, info='Faster speed, but often makes hands and fingers slightly worse.')
451
-
452
- n_prompt = gr.Textbox(label="Negative Prompt", value="", visible=False) # Not used
453
- seed = gr.Number(label="Seed", value=31337, precision=0)
454
-
455
-
456
- latent_window_size = gr.Slider(label="Latent Window Size", minimum=1, maximum=33, value=9, step=1, visible=False) # Should not change
457
- steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=25, step=1, info='Changing this value is not recommended.')
458
-
459
- cfg = gr.Slider(label="CFG Scale", minimum=1.0, maximum=32.0, value=1.0, step=0.01, visible=False) # Should not change
460
- gs = gr.Slider(label="Distilled CFG Scale", minimum=1.0, maximum=32.0, value=10.0, step=0.01, info='Changing this value is not recommended.')
461
- rs = gr.Slider(label="CFG Re-Scale", minimum=0.0, maximum=1.0, value=0.0, step=0.01, visible=False) # Should not change
462
-
463
- gpu_memory_preservation = gr.Slider(label="GPU Inference Preserved Memory (GB) (larger means slower)", minimum=6, maximum=128, value=6, step=0.1, info="Set this number to a larger value if you encounter OOM. Larger value causes slower speed.")
464
-
465
- mp4_crf = gr.Slider(label="MP4 Compression", minimum=0, maximum=100, value=16, step=1, info="Lower means better quality. 0 is uncompressed. Change to 16 if you get black outputs. ")
466
-
467
- with gr.Column():
468
- preview_image = gr.Image(label="Next Latents", height=200, visible=False)
469
- result_video = gr.Video(label="Finished Frames", autoplay=True, show_share_button=False, height=512, loop=True)
470
- progress_desc = gr.Markdown('', elem_classes='no-generating-animation')
471
- progress_bar = gr.HTML('', elem_classes='no-generating-animation')
472
-
473
- gr.HTML('<div style="text-align:center; margin-top:20px;">Share your results and find ideas at the <a href="https://x.com/search?q=framepack&f=live" target="_blank">FramePack Twitter (X) thread</a></div>')
474
-
475
- ips = [input_image, prompt, t2v, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf]
476
- start_button.click(fn=process, inputs=ips, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button])
477
- end_button.click(fn=end_process)
478
-
479
- # gr.Examples(
480
- # examples,
481
- # inputs=[input_image, prompt],
482
- # outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button],
483
- # fn=generate_examples,
484
- # cache_examples=True
485
- # )
486
-
487
-
488
- block.launch(ssr_mode=False)
 
1
+ from diffusers_helper.hf_login import login
2
+
3
+ import os
4
+
5
+ os.environ['HF_HOME'] = os.path.abspath(os.path.realpath(os.path.join(os.path.dirname(__file__), './hf_download')))
6
+
7
+ import gradio as gr
8
+ import torch
9
+ import traceback
10
+ import einops
11
+ import safetensors.torch as sf
12
+ import numpy as np
13
+ import math
14
+ import spaces
15
+
16
+ from PIL import Image
17
+ from diffusers import AutoencoderKLHunyuanVideo
18
+ from transformers import LlamaModel, CLIPTextModel, LlamaTokenizerFast, CLIPTokenizer
19
+ from diffusers_helper.hunyuan import encode_prompt_conds, vae_decode, vae_encode, vae_decode_fake
20
+ from diffusers_helper.utils import save_bcthw_as_mp4, crop_or_pad_yield_mask, soft_append_bcthw, resize_and_center_crop, state_dict_weighted_merge, state_dict_offset_merge, generate_timestamp
21
+ from diffusers_helper.models.hunyuan_video_packed import HunyuanVideoTransformer3DModelPacked
22
+ from diffusers_helper.pipelines.k_diffusion_hunyuan import sample_hunyuan
23
+ from diffusers_helper.memory import cpu, gpu, get_cuda_free_memory_gb, move_model_to_device_with_memory_preservation, offload_model_from_device_for_memory_preservation, fake_diffusers_current_device, DynamicSwapInstaller, unload_complete_models, load_model_as_complete
24
+ from diffusers_helper.thread_utils import AsyncStream, async_run
25
+ from diffusers_helper.gradio.progress_bar import make_progress_bar_css, make_progress_bar_html
26
+ from transformers import SiglipImageProcessor, SiglipVisionModel
27
+ from diffusers_helper.clip_vision import hf_clip_vision_encode
28
+ from diffusers_helper.bucket_tools import find_nearest_bucket
29
+
30
+
31
+ free_mem_gb = get_cuda_free_memory_gb(gpu)
32
+ high_vram = free_mem_gb > 80
33
+
34
+ print(f'Free VRAM {free_mem_gb} GB')
35
+ print(f'High-VRAM Mode: {high_vram}')
36
+
37
+ text_encoder = LlamaModel.from_pretrained("Fabrice-TIERCELIN/HunyuanVideo", subfolder='text_encoder', torch_dtype=torch.float16).cpu()
38
+ text_encoder_2 = CLIPTextModel.from_pretrained("Fabrice-TIERCELIN/HunyuanVideo", subfolder='text_encoder_2', torch_dtype=torch.float16).cpu()
39
+ tokenizer = LlamaTokenizerFast.from_pretrained("Fabrice-TIERCELIN/HunyuanVideo", subfolder='tokenizer')
40
+ tokenizer_2 = CLIPTokenizer.from_pretrained("Fabrice-TIERCELIN/HunyuanVideo", subfolder='tokenizer_2')
41
+ vae = AutoencoderKLHunyuanVideo.from_pretrained("Fabrice-TIERCELIN/HunyuanVideo", subfolder='vae', torch_dtype=torch.float16).cpu()
42
+
43
+ feature_extractor = SiglipImageProcessor.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='feature_extractor')
44
+ image_encoder = SiglipVisionModel.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='image_encoder', torch_dtype=torch.float16).cpu()
45
+
46
+ # quant_config = DiffusersBitsAndBytesConfig(load_in_8bit=True)
47
+ # transformer = HunyuanVideoTransformer3DModelPacked.from_single_file("https://huggingface.co/sirolim/FramePack_F1_I2V_FP8/resolve/main/FramePack_F1_I2V_HY_fp8_e4m3fn.safetensors", torch_dtype=torch.bfloat16)
48
+ # transformer = HunyuanVideoTransformer3DModelPacked.from_single_file('sirolim/FramePack_F1_I2V_FP8', "FramePack_F1_I2V_HY_fp8_e4m3fn.safetensors", use_safetensors=True, torch_dtype=torch.bfloat16).cpu()
49
+ transformer = HunyuanVideoTransformer3DModelPacked.from_pretrained('lllyasviel/FramePack_F1_I2V_HY_20250503', torch_dtype=torch.bfloat16).cpu()
50
+
51
+ vae.eval()
52
+ text_encoder.eval()
53
+ text_encoder_2.eval()
54
+ image_encoder.eval()
55
+ transformer.eval()
56
+
57
+ if not high_vram:
58
+ vae.enable_slicing()
59
+ vae.enable_tiling()
60
+
61
+ transformer.high_quality_fp32_output_for_inference = True
62
+ print('transformer.high_quality_fp32_output_for_inference = True')
63
+
64
+ transformer.to(dtype=torch.bfloat16)
65
+ vae.to(dtype=torch.float16)
66
+ image_encoder.to(dtype=torch.float16)
67
+ text_encoder.to(dtype=torch.float16)
68
+ text_encoder_2.to(dtype=torch.float16)
69
+
70
+ vae.requires_grad_(False)
71
+ text_encoder.requires_grad_(False)
72
+ text_encoder_2.requires_grad_(False)
73
+ image_encoder.requires_grad_(False)
74
+ transformer.requires_grad_(False)
75
+
76
+ if not high_vram:
77
+ # DynamicSwapInstaller is same as huggingface's enable_sequential_offload but 3x faster
78
+ DynamicSwapInstaller.install_model(transformer, device=gpu)
79
+ DynamicSwapInstaller.install_model(text_encoder, device=gpu)
80
+ else:
81
+ text_encoder.to(gpu)
82
+ text_encoder_2.to(gpu)
83
+ image_encoder.to(gpu)
84
+ vae.to(gpu)
85
+ transformer.to(gpu)
86
+
87
+ stream = AsyncStream()
88
+
89
+ outputs_folder = './outputs/'
90
+ os.makedirs(outputs_folder, exist_ok=True)
91
+
92
+ examples = [
93
+ ["img_examples/1.png", "The girl dances gracefully, with clear movements, full of charm.",],
94
+ ["img_examples/2.jpg", "The man dances flamboyantly, swinging his hips and striking bold poses with dramatic flair."],
95
+ ["img_examples/3.png", "The woman dances elegantly among the blossoms, spinning slowly with flowing sleeves and graceful hand movements."],
96
+ ]
97
+
98
+ def generate_examples(input_image, prompt):
99
+
100
+ t2v=False
101
+ n_prompt=""
102
+ seed=31337
103
+ total_second_length=5
104
+ latent_window_size=9
105
+ steps=25
106
+ cfg=1.0
107
+ gs=10.0
108
+ rs=0.0
109
+ gpu_memory_preservation=6
110
+ use_teacache=True
111
+ mp4_crf=16
112
+
113
+ global stream
114
+
115
+ # assert input_image is not None, 'No input image!'
116
+ if t2v:
117
+ default_height, default_width = 640, 640
118
+ input_image = np.ones((default_height, default_width, 3), dtype=np.uint8) * 255
119
+ print("No input image provided. Using a blank white image.")
120
+
121
+ yield None, None, '', '', gr.update(interactive=False), gr.update(interactive=True)
122
+
123
+ stream = AsyncStream()
124
+
125
+ async_run(worker, input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf)
126
+
127
+ output_filename = None
128
+
129
+ while True:
130
+ flag, data = stream.output_queue.next()
131
+
132
+ if flag == 'file':
133
+ output_filename = data
134
+ yield output_filename, gr.update(), gr.update(), gr.update(), gr.update(interactive=False), gr.update(interactive=True)
135
+
136
+ if flag == 'progress':
137
+ preview, desc, html = data
138
+ yield gr.update(), gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True)
139
+
140
+ if flag == 'end':
141
+ yield output_filename, gr.update(visible=False), gr.update(), '', gr.update(interactive=True), gr.update(interactive=False)
142
+ break
143
+
144
+
145
+
146
+ @torch.no_grad()
147
+ def worker(input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf):
148
+ total_latent_sections = (total_second_length * 30) / (latent_window_size * 4)
149
+ total_latent_sections = int(max(round(total_latent_sections), 1))
150
+
151
+ job_id = generate_timestamp()
152
+
153
+ stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Starting ...'))))
154
+
155
+ try:
156
+ # Clean GPU
157
+ if not high_vram:
158
+ unload_complete_models(
159
+ text_encoder, text_encoder_2, image_encoder, vae, transformer
160
+ )
161
+
162
+ # Text encoding
163
+
164
+ stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Text encoding ...'))))
165
+
166
+ if not high_vram:
167
+ fake_diffusers_current_device(text_encoder, gpu) # since we only encode one text - that is one model move and one encode, offload is same time consumption since it is also one load and one encode.
168
+ load_model_as_complete(text_encoder_2, target_device=gpu)
169
+
170
+ llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
171
+
172
+ if cfg == 1:
173
+ llama_vec_n, clip_l_pooler_n = torch.zeros_like(llama_vec), torch.zeros_like(clip_l_pooler)
174
+ else:
175
+ llama_vec_n, clip_l_pooler_n = encode_prompt_conds(n_prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
176
+
177
+ llama_vec, llama_attention_mask = crop_or_pad_yield_mask(llama_vec, length=512)
178
+ llama_vec_n, llama_attention_mask_n = crop_or_pad_yield_mask(llama_vec_n, length=512)
179
+
180
+ # Processing input image
181
+
182
+ stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Image processing ...'))))
183
+
184
+ H, W, C = input_image.shape
185
+ height, width = find_nearest_bucket(H, W, resolution=640)
186
+ input_image_np = resize_and_center_crop(input_image, target_width=width, target_height=height)
187
+
188
+ Image.fromarray(input_image_np).save(os.path.join(outputs_folder, f'{job_id}.png'))
189
+
190
+ input_image_pt = torch.from_numpy(input_image_np).float() / 127.5 - 1
191
+ input_image_pt = input_image_pt.permute(2, 0, 1)[None, :, None]
192
+
193
+ # VAE encoding
194
+
195
+ stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'VAE encoding ...'))))
196
+
197
+ if not high_vram:
198
+ load_model_as_complete(vae, target_device=gpu)
199
+
200
+ start_latent = vae_encode(input_image_pt, vae)
201
+
202
+ # CLIP Vision
203
+
204
+ stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'CLIP Vision encoding ...'))))
205
+
206
+ if not high_vram:
207
+ load_model_as_complete(image_encoder, target_device=gpu)
208
+
209
+ image_encoder_output = hf_clip_vision_encode(input_image_np, feature_extractor, image_encoder)
210
+ image_encoder_last_hidden_state = image_encoder_output.last_hidden_state
211
+
212
+ # Dtype
213
+
214
+ llama_vec = llama_vec.to(transformer.dtype)
215
+ llama_vec_n = llama_vec_n.to(transformer.dtype)
216
+ clip_l_pooler = clip_l_pooler.to(transformer.dtype)
217
+ clip_l_pooler_n = clip_l_pooler_n.to(transformer.dtype)
218
+ image_encoder_last_hidden_state = image_encoder_last_hidden_state.to(transformer.dtype)
219
+
220
+ # Sampling
221
+
222
+ stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Start sampling ...'))))
223
+
224
+ rnd = torch.Generator("cpu").manual_seed(seed)
225
+
226
+ history_latents = torch.zeros(size=(1, 16, 16 + 2 + 1, height // 8, width // 8), dtype=torch.float32).cpu()
227
+ history_pixels = None
228
+
229
+ history_latents = torch.cat([history_latents, start_latent.to(history_latents)], dim=2)
230
+ total_generated_latent_frames = 1
231
+
232
+ for section_index in range(total_latent_sections):
233
+ if stream.input_queue.top() == 'end':
234
+ stream.output_queue.push(('end', None))
235
+ return
236
+
237
+ print(f'section_index = {section_index}, total_latent_sections = {total_latent_sections}')
238
+
239
+ if not high_vram:
240
+ unload_complete_models()
241
+ move_model_to_device_with_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=gpu_memory_preservation)
242
+
243
+ if use_teacache:
244
+ transformer.initialize_teacache(enable_teacache=True, num_steps=steps)
245
+ else:
246
+ transformer.initialize_teacache(enable_teacache=False)
247
+
248
+ def callback(d):
249
+ preview = d['denoised']
250
+ preview = vae_decode_fake(preview)
251
+
252
+ preview = (preview * 255.0).detach().cpu().numpy().clip(0, 255).astype(np.uint8)
253
+ preview = einops.rearrange(preview, 'b c t h w -> (b h) (t w) c')
254
+
255
+ if stream.input_queue.top() == 'end':
256
+ stream.output_queue.push(('end', None))
257
+ raise KeyboardInterrupt('User ends the task.')
258
+
259
+ current_step = d['i'] + 1
260
+ percentage = int(100.0 * current_step / steps)
261
+ hint = f'Sampling {current_step}/{steps}'
262
+ desc = f'Total generated frames: {int(max(0, total_generated_latent_frames * 4 - 3))}, Video length: {max(0, (total_generated_latent_frames * 4 - 3) / 30) :.2f} seconds (FPS-30). The video is being extended now ...'
263
+ stream.output_queue.push(('progress', (preview, desc, make_progress_bar_html(percentage, hint))))
264
+ return
265
+
266
+ indices = torch.arange(0, sum([1, 16, 2, 1, latent_window_size])).unsqueeze(0)
267
+ clean_latent_indices_start, clean_latent_4x_indices, clean_latent_2x_indices, clean_latent_1x_indices, latent_indices = indices.split([1, 16, 2, 1, latent_window_size], dim=1)
268
+ clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1)
269
+
270
+ clean_latents_4x, clean_latents_2x, clean_latents_1x = history_latents[:, :, -sum([16, 2, 1]):, :, :].split([16, 2, 1], dim=2)
271
+ clean_latents = torch.cat([start_latent.to(history_latents), clean_latents_1x], dim=2)
272
+
273
+ generated_latents = sample_hunyuan(
274
+ transformer=transformer,
275
+ sampler='unipc',
276
+ width=width,
277
+ height=height,
278
+ frames=latent_window_size * 4 - 3,
279
+ real_guidance_scale=cfg,
280
+ distilled_guidance_scale=gs,
281
+ guidance_rescale=rs,
282
+ # shift=3.0,
283
+ num_inference_steps=steps,
284
+ generator=rnd,
285
+ prompt_embeds=llama_vec,
286
+ prompt_embeds_mask=llama_attention_mask,
287
+ prompt_poolers=clip_l_pooler,
288
+ negative_prompt_embeds=llama_vec_n,
289
+ negative_prompt_embeds_mask=llama_attention_mask_n,
290
+ negative_prompt_poolers=clip_l_pooler_n,
291
+ device=gpu,
292
+ dtype=torch.bfloat16,
293
+ image_embeddings=image_encoder_last_hidden_state,
294
+ latent_indices=latent_indices,
295
+ clean_latents=clean_latents,
296
+ clean_latent_indices=clean_latent_indices,
297
+ clean_latents_2x=clean_latents_2x,
298
+ clean_latent_2x_indices=clean_latent_2x_indices,
299
+ clean_latents_4x=clean_latents_4x,
300
+ clean_latent_4x_indices=clean_latent_4x_indices,
301
+ callback=callback,
302
+ )
303
+
304
+ total_generated_latent_frames += int(generated_latents.shape[2])
305
+ history_latents = torch.cat([history_latents, generated_latents.to(history_latents)], dim=2)
306
+
307
+ if not high_vram:
308
+ offload_model_from_device_for_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=8)
309
+ load_model_as_complete(vae, target_device=gpu)
310
+
311
+ real_history_latents = history_latents[:, :, -total_generated_latent_frames:, :, :]
312
+
313
+ if history_pixels is None:
314
+ history_pixels = vae_decode(real_history_latents, vae).cpu()
315
+ else:
316
+ section_latent_frames = latent_window_size * 2
317
+ overlapped_frames = latent_window_size * 4 - 3
318
+
319
+ current_pixels = vae_decode(real_history_latents[:, :, -section_latent_frames:], vae).cpu()
320
+ history_pixels = soft_append_bcthw(history_pixels, current_pixels, overlapped_frames)
321
+
322
+ if not high_vram:
323
+ unload_complete_models()
324
+
325
+ output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4')
326
+
327
+ save_bcthw_as_mp4(history_pixels, output_filename, fps=30, crf=mp4_crf)
328
+
329
+ print(f'Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}')
330
+
331
+ stream.output_queue.push(('file', output_filename))
332
+ except:
333
+ traceback.print_exc()
334
+
335
+ if not high_vram:
336
+ unload_complete_models(
337
+ text_encoder, text_encoder_2, image_encoder, vae, transformer
338
+ )
339
+
340
+ stream.output_queue.push(('end', None))
341
+ return
342
+
343
+ def get_duration(input_image, prompt, t2v, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf):
344
+ return total_second_length * 60
345
+
346
+ @spaces.GPU(duration=get_duration)
347
+ def process(input_image, prompt,
348
+ t2v=False,
349
+ n_prompt="",
350
+ seed=31337,
351
+ total_second_length=5,
352
+ latent_window_size=9,
353
+ steps=25,
354
+ cfg=1.0,
355
+ gs=10.0,
356
+ rs=0.0,
357
+ gpu_memory_preservation=6,
358
+ use_teacache=True,
359
+ mp4_crf=16
360
+ ):
361
+ global stream
362
+
363
+ # assert input_image is not None, 'No input image!'
364
+ if t2v:
365
+ default_height, default_width = 640, 640
366
+ input_image = np.ones((default_height, default_width, 3), dtype=np.uint8) * 255
367
+ print("No input image provided. Using a blank white image.")
368
+ else:
369
+ composite_rgba_uint8 = input_image["composite"]
370
+
371
+ # rgb_uint8 will be (H, W, 3), dtype uint8
372
+ rgb_uint8 = composite_rgba_uint8[:, :, :3]
373
+ # mask_uint8 will be (H, W), dtype uint8
374
+ mask_uint8 = composite_rgba_uint8[:, :, 3]
375
+
376
+ # Create background
377
+ h, w = rgb_uint8.shape[:2]
378
+ # White background, (H, W, 3), dtype uint8
379
+ background_uint8 = np.full((h, w, 3), 255, dtype=np.uint8)
380
+
381
+ # Normalize mask to range [0.0, 1.0].
382
+ alpha_normalized_float32 = mask_uint8.astype(np.float32) / 255.0
383
+
384
+ # Expand alpha to 3 channels to match RGB images for broadcasting.
385
+ # alpha_mask_float32 will have shape (H, W, 3)
386
+ alpha_mask_float32 = np.stack([alpha_normalized_float32] * 3, axis=2)
387
+
388
+ # alpha blending
389
+ blended_image_float32 = rgb_uint8.astype(np.float32) * alpha_mask_float32 + \
390
+ background_uint8.astype(np.float32) * (1.0 - alpha_mask_float32)
391
+
392
+ input_image = np.clip(blended_image_float32, 0, 255).astype(np.uint8)
393
+
394
+ yield None, None, '', '', gr.update(interactive=False), gr.update(interactive=True)
395
+
396
+ stream = AsyncStream()
397
+
398
+ async_run(worker, input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf)
399
+
400
+ output_filename = None
401
+
402
+ while True:
403
+ flag, data = stream.output_queue.next()
404
+
405
+ if flag == 'file':
406
+ output_filename = data
407
+ yield output_filename, gr.update(), gr.update(), gr.update(), gr.update(interactive=False), gr.update(interactive=True)
408
+
409
+ if flag == 'progress':
410
+ preview, desc, html = data
411
+ yield gr.update(), gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True)
412
+
413
+ if flag == 'end':
414
+ yield output_filename, gr.update(visible=False), gr.update(), '', gr.update(interactive=True), gr.update(interactive=False)
415
+ break
416
+
417
+
418
+ def end_process():
419
+ stream.input_queue.push('end')
420
+
421
+
422
+ quick_prompts = [
423
+ 'The girl dances gracefully, with clear movements, full of charm.',
424
+ 'A character doing some simple body movements.',
425
+ ]
426
+ quick_prompts = [[x] for x in quick_prompts]
427
+
428
+
429
+ css = make_progress_bar_css()
430
+ block = gr.Blocks(css=css).queue()
431
+ with block:
432
+ gr.Markdown('# FramePack Essentials | Experimentation in Progress')
433
+ gr.Markdown(f"""### Space is constantly being tinkered with, expect downtime and errors.
434
+ """)
435
+ with gr.Row():
436
+ with gr.Column():
437
+ input_image = gr.ImageEditor(type="numpy", label="Image", height=320, brush=gr.Brush(colors=["#ffffff"]))
438
+ prompt = gr.Textbox(label="Prompt", value='')
439
+ t2v = gr.Checkbox(label="do text-to-video", value=False)
440
+ example_quick_prompts = gr.Dataset(samples=quick_prompts, label='Quick List', samples_per_page=1000, components=[prompt])
441
+ example_quick_prompts.click(lambda x: x[0], inputs=[example_quick_prompts], outputs=prompt, show_progress=False, queue=False)
442
+
443
+ with gr.Row():
444
+ start_button = gr.Button(value="Start Generation")
445
+ end_button = gr.Button(value="End Generation", interactive=False)
446
+
447
+ total_second_length = gr.Slider(label="Total Video Length (Seconds)", minimum=1, maximum=5, value=2, step=0.1)
448
+ with gr.Group():
449
+ with gr.Accordion("Advanced settings", open=False):
450
+ use_teacache = gr.Checkbox(label='Use TeaCache', value=True, info='Faster speed, but often makes hands and fingers slightly worse.')
451
+
452
+ n_prompt = gr.Textbox(label="Negative Prompt", value="", visible=False) # Not used
453
+ seed = gr.Number(label="Seed", value=31337, precision=0)
454
+
455
+
456
+ latent_window_size = gr.Slider(label="Latent Window Size", minimum=1, maximum=33, value=9, step=1, visible=False) # Should not change
457
+ steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=25, step=1, info='Changing this value is not recommended.')
458
+
459
+ cfg = gr.Slider(label="CFG Scale", minimum=1.0, maximum=32.0, value=1.0, step=0.01, visible=False) # Should not change
460
+ gs = gr.Slider(label="Distilled CFG Scale", minimum=1.0, maximum=32.0, value=10.0, step=0.01, info='Changing this value is not recommended.')
461
+ rs = gr.Slider(label="CFG Re-Scale", minimum=0.0, maximum=1.0, value=0.0, step=0.01, visible=False) # Should not change
462
+
463
+ gpu_memory_preservation = gr.Slider(label="GPU Inference Preserved Memory (GB) (larger means slower)", minimum=6, maximum=128, value=6, step=0.1, info="Set this number to a larger value if you encounter OOM. Larger value causes slower speed.")
464
+
465
+ mp4_crf = gr.Slider(label="MP4 Compression", minimum=0, maximum=100, value=16, step=1, info="Lower means better quality. 0 is uncompressed. Change to 16 if you get black outputs. ")
466
+
467
+ with gr.Column():
468
+ preview_image = gr.Image(label="Next Latents", height=200, visible=False)
469
+ result_video = gr.Video(label="Finished Frames", autoplay=True, show_share_button=False, height=512, loop=True)
470
+ progress_desc = gr.Markdown('', elem_classes='no-generating-animation')
471
+ progress_bar = gr.HTML('', elem_classes='no-generating-animation')
472
+
473
+ gr.HTML('<div style="text-align:center; margin-top:20px;">Share your results and find ideas at the <a href="https://x.com/search?q=framepack&f=live" target="_blank">FramePack Twitter (X) thread</a></div>')
474
+
475
+ ips = [input_image, prompt, t2v, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf]
476
+ start_button.click(fn=process, inputs=ips, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button])
477
+ end_button.click(fn=end_process)
478
+
479
+ # gr.Examples(
480
+ # examples,
481
+ # inputs=[input_image, prompt],
482
+ # outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button],
483
+ # fn=generate_examples,
484
+ # cache_examples=True
485
+ # )
486
+
487
+
488
+ block.launch(ssr_mode=False)