| from diffusers_helper.hf_login import login |
|
|
| import os |
|
|
| os.environ['HF_HOME'] = os.path.abspath(os.path.realpath(os.path.join(os.path.dirname(__file__), './hf_download'))) |
|
|
| import spaces |
| import gradio as gr |
| import torch |
| import traceback |
| import einops |
| import safetensors.torch as sf |
| import numpy as np |
| import argparse |
| import random |
| import math |
| |
| import decord |
| |
| from tqdm import tqdm |
| |
| import pathlib |
| |
| from datetime import datetime |
| |
| import imageio_ffmpeg |
| import tempfile |
| import shutil |
| import subprocess |
|
|
| from PIL import Image |
| from diffusers import AutoencoderKLHunyuanVideo |
| from transformers import LlamaModel, CLIPTextModel, LlamaTokenizerFast, CLIPTokenizer |
| from diffusers_helper.hunyuan import encode_prompt_conds, vae_decode, vae_encode, vae_decode_fake |
| from diffusers_helper.utils import save_bcthw_as_mp4, crop_or_pad_yield_mask, soft_append_bcthw, resize_and_center_crop, state_dict_weighted_merge, state_dict_offset_merge, generate_timestamp |
| from diffusers_helper.models.hunyuan_video_packed import HunyuanVideoTransformer3DModelPacked |
| from diffusers_helper.pipelines.k_diffusion_hunyuan import sample_hunyuan |
| from diffusers_helper.memory import cpu, gpu, get_cuda_free_memory_gb, move_model_to_device_with_memory_preservation, offload_model_from_device_for_memory_preservation, fake_diffusers_current_device, DynamicSwapInstaller, unload_complete_models, load_model_as_complete |
| from diffusers_helper.thread_utils import AsyncStream, async_run |
| from diffusers_helper.gradio.progress_bar import make_progress_bar_css, make_progress_bar_html |
| from transformers import SiglipImageProcessor, SiglipVisionModel |
| from diffusers_helper.clip_vision import hf_clip_vision_encode |
| from diffusers_helper.bucket_tools import find_nearest_bucket |
| from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig, HunyuanVideoTransformer3DModel, HunyuanVideoPipeline |
|
|
| if torch.cuda.device_count() > 0: |
| free_mem_gb = get_cuda_free_memory_gb(gpu) |
| high_vram = free_mem_gb > 60 |
|
|
| print(f'Free VRAM {free_mem_gb} GB') |
| print(f'High-VRAM Mode: {high_vram}') |
|
|
|
|
|
|
| text_encoder = LlamaModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder', torch_dtype=torch.float16).cpu() |
| text_encoder_2 = CLIPTextModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder_2', torch_dtype=torch.float16).cpu() |
| tokenizer = LlamaTokenizerFast.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer') |
| tokenizer_2 = CLIPTokenizer.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer_2') |
| vae = AutoencoderKLHunyuanVideo.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='vae', torch_dtype=torch.float16).cpu() |
|
|
| feature_extractor = SiglipImageProcessor.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='feature_extractor') |
| image_encoder = SiglipVisionModel.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='image_encoder', torch_dtype=torch.float16).cpu() |
|
|
| transformer = HunyuanVideoTransformer3DModelPacked.from_pretrained('lllyasviel/FramePack_F1_I2V_HY_20250503', torch_dtype=torch.bfloat16).cpu() |
|
|
| vae.eval() |
| text_encoder.eval() |
| text_encoder_2.eval() |
| image_encoder.eval() |
| transformer.eval() |
|
|
| if not high_vram: |
| vae.enable_slicing() |
| vae.enable_tiling() |
|
|
| transformer.high_quality_fp32_output_for_inference = True |
| print('transformer.high_quality_fp32_output_for_inference = True') |
|
|
| transformer.to(dtype=torch.bfloat16) |
| vae.to(dtype=torch.float16) |
| image_encoder.to(dtype=torch.float16) |
| text_encoder.to(dtype=torch.float16) |
| text_encoder_2.to(dtype=torch.float16) |
|
|
| vae.requires_grad_(False) |
| text_encoder.requires_grad_(False) |
| text_encoder_2.requires_grad_(False) |
| image_encoder.requires_grad_(False) |
| transformer.requires_grad_(False) |
|
|
| if not high_vram: |
| |
| DynamicSwapInstaller.install_model(transformer, device=gpu) |
| DynamicSwapInstaller.install_model(text_encoder, device=gpu) |
| else: |
| text_encoder.to(gpu) |
| text_encoder_2.to(gpu) |
| image_encoder.to(gpu) |
| vae.to(gpu) |
| transformer.to(gpu) |
|
|
| stream = AsyncStream() |
|
|
| outputs_folder = './outputs/' |
| os.makedirs(outputs_folder, exist_ok=True) |
|
|
| input_image_debug_value = prompt_debug_value = total_second_length_debug_value = None |
|
|
| @spaces.GPU() |
| @torch.no_grad() |
| def video_encode(video_path, resolution, no_resize, vae, vae_batch_size=16, device="cuda", width=None, height=None): |
| """ |
| Encode a video into latent representations using the VAE. |
| |
| Args: |
| video_path: Path to the input video file. |
| vae: AutoencoderKLHunyuanVideo model. |
| height, width: Target resolution for resizing frames. |
| vae_batch_size: Number of frames to process per batch. |
| device: Device for computation (e.g., "cuda"). |
| |
| Returns: |
| start_latent: Latent of the first frame (for compatibility with original code). |
| input_image_np: First frame as numpy array (for CLIP vision encoding). |
| history_latents: Latents of all frames (shape: [1, channels, frames, height//8, width//8]). |
| fps: Frames per second of the input video. |
| """ |
| |
| video_path = str(pathlib.Path(video_path).resolve()) |
| print(f"Processing video: {video_path}") |
|
|
| |
| if device == "cuda" and not torch.cuda.is_available(): |
| print("CUDA is not available, falling back to CPU") |
| device = "cpu" |
|
|
| try: |
| |
| print("Initializing VideoReader...") |
| vr = decord.VideoReader(video_path) |
| fps = vr.get_avg_fps() |
| num_real_frames = len(vr) |
| print(f"Video loaded: {num_real_frames} frames, FPS: {fps}") |
|
|
| |
| latent_size_factor = 4 |
| num_frames = (num_real_frames // latent_size_factor) * latent_size_factor |
| if num_frames != num_real_frames: |
| print(f"Truncating video from {num_real_frames} to {num_frames} frames for latent size compatibility") |
| num_real_frames = num_frames |
|
|
| |
| print("Reading video frames...") |
| frames = vr.get_batch(range(num_real_frames)).asnumpy() |
| print(f"Frames read: {frames.shape}") |
|
|
| |
| native_height, native_width = frames.shape[1], frames.shape[2] |
| print(f"Native video resolution: {native_width}x{native_height}") |
|
|
| |
| target_height = native_height if height is None else height |
| target_width = native_width if width is None else width |
|
|
| |
| if not no_resize: |
| target_height, target_width = find_nearest_bucket(target_height, target_width, resolution=resolution) |
| print(f"Adjusted resolution: {target_width}x{target_height}") |
| else: |
| print(f"Using native resolution without resizing: {target_width}x{target_height}") |
|
|
| |
| processed_frames = [] |
| for i, frame in enumerate(frames): |
| |
| frame_np = resize_and_center_crop(frame, target_width=target_width, target_height=target_height) |
| processed_frames.append(frame_np) |
| processed_frames = np.stack(processed_frames) |
| print(f"Frames preprocessed: {processed_frames.shape}") |
|
|
| |
| input_image_np = processed_frames[0] |
|
|
| |
| print("Converting frames to tensor...") |
| frames_pt = torch.from_numpy(processed_frames).float() / 127.5 - 1 |
| frames_pt = frames_pt.permute(0, 3, 1, 2) |
| frames_pt = frames_pt.unsqueeze(0) |
| frames_pt = frames_pt.permute(0, 2, 1, 3, 4) |
| print(f"Tensor shape: {frames_pt.shape}") |
|
|
| |
| input_video_pixels = frames_pt.cpu() |
|
|
| |
| print(f"Moving tensor to device: {device}") |
| frames_pt = frames_pt.to(device) |
| print("Tensor moved to device") |
|
|
| |
| print(f"Moving VAE to device: {device}") |
| vae.to(device) |
| print("VAE moved to device") |
|
|
| |
| print(f"Encoding input video frames in VAE batch size {vae_batch_size} (reduce if memory issues here or if forcing video resolution)") |
| latents = [] |
| vae.eval() |
| with torch.no_grad(): |
| for i in tqdm(range(0, frames_pt.shape[2], vae_batch_size), desc="Encoding video frames", mininterval=0.1): |
| |
| batch = frames_pt[:, :, i:i + vae_batch_size] |
| try: |
| |
| if device == "cuda": |
| free_mem = torch.cuda.memory_allocated() / 1024**3 |
| |
| batch_latent = vae_encode(batch, vae) |
| |
| if device == "cuda": |
| torch.cuda.synchronize() |
| |
| latents.append(batch_latent) |
| |
| except RuntimeError as e: |
| print(f"Error during VAE encoding: {str(e)}") |
| if device == "cuda" and "out of memory" in str(e).lower(): |
| print("CUDA out of memory, try reducing vae_batch_size or using CPU") |
| raise |
|
|
| |
| print("Concatenating latents...") |
| history_latents = torch.cat(latents, dim=2) |
| print(f"History latents shape: {history_latents.shape}") |
|
|
| |
| start_latent = history_latents[:, :, :1] |
| print(f"Start latent shape: {start_latent.shape}") |
|
|
| |
| if device == "cuda": |
| vae.to(cpu) |
| torch.cuda.empty_cache() |
| print("VAE moved back to CPU, CUDA cache cleared") |
|
|
| return start_latent, input_image_np, history_latents, fps, target_height, target_width, input_video_pixels |
|
|
| except Exception as e: |
| print(f"Error in video_encode: {str(e)}") |
| raise |
|
|
| |
| def set_mp4_comments_imageio_ffmpeg(input_file, comments): |
| try: |
| |
| ffmpeg_path = imageio_ffmpeg.get_ffmpeg_exe() |
|
|
| |
| if not os.path.exists(input_file): |
| print(f"Error: Input file {input_file} does not exist") |
| return False |
|
|
| |
| temp_file = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False).name |
|
|
| |
| command = [ |
| ffmpeg_path, |
| '-i', input_file, |
| '-metadata', f'comment={comments}', |
| '-c:v', 'copy', |
| '-c:a', 'copy', |
| '-y', |
| temp_file |
| ] |
|
|
| |
| result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) |
|
|
| if result.returncode == 0: |
| |
| shutil.move(temp_file, input_file) |
| print(f"Successfully added comments to {input_file}") |
| return True |
| else: |
| |
| if os.path.exists(temp_file): |
| os.remove(temp_file) |
| print(f"Error: FFmpeg failed with message:\n{result.stderr}") |
| return False |
|
|
| except Exception as e: |
| |
| if 'temp_file' in locals() and os.path.exists(temp_file): |
| os.remove(temp_file) |
| print(f"Error saving prompt to video metadata, ffmpeg may be required: "+str(e)) |
| return False |
|
|
| |
| @spaces.GPU() |
| @torch.no_grad() |
| def worker_video(input_video, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch): |
|
|
| stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Starting ...')))) |
|
|
| try: |
| |
| if not high_vram: |
| unload_complete_models( |
| text_encoder, text_encoder_2, image_encoder, vae, transformer |
| ) |
|
|
| |
| stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Text encoding ...')))) |
|
|
| if not high_vram: |
| fake_diffusers_current_device(text_encoder, gpu) |
| load_model_as_complete(text_encoder_2, target_device=gpu) |
|
|
| llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2) |
|
|
| if cfg == 1: |
| llama_vec_n, clip_l_pooler_n = torch.zeros_like(llama_vec), torch.zeros_like(clip_l_pooler) |
| else: |
| llama_vec_n, clip_l_pooler_n = encode_prompt_conds(n_prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2) |
|
|
| llama_vec, llama_attention_mask = crop_or_pad_yield_mask(llama_vec, length=512) |
| llama_vec_n, llama_attention_mask_n = crop_or_pad_yield_mask(llama_vec_n, length=512) |
|
|
| |
| stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Video processing ...')))) |
|
|
| |
| |
| |
| |
| start_latent, input_image_np, video_latents, fps, height, width, input_video_pixels = video_encode(input_video, resolution, no_resize, vae, vae_batch_size=vae_batch, device=gpu) |
|
|
| |
|
|
| |
| stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'CLIP Vision encoding ...')))) |
|
|
| if not high_vram: |
| load_model_as_complete(image_encoder, target_device=gpu) |
|
|
| image_encoder_output = hf_clip_vision_encode(input_image_np, feature_extractor, image_encoder) |
| image_encoder_last_hidden_state = image_encoder_output.last_hidden_state |
|
|
| |
| llama_vec = llama_vec.to(transformer.dtype) |
| llama_vec_n = llama_vec_n.to(transformer.dtype) |
| clip_l_pooler = clip_l_pooler.to(transformer.dtype) |
| clip_l_pooler_n = clip_l_pooler_n.to(transformer.dtype) |
| image_encoder_last_hidden_state = image_encoder_last_hidden_state.to(transformer.dtype) |
|
|
| total_latent_sections = (total_second_length * fps) / (latent_window_size * 4) |
| total_latent_sections = int(max(round(total_latent_sections), 1)) |
|
|
| for idx in range(batch): |
| if batch > 1: |
| print(f"Beginning video {idx+1} of {batch} with seed {seed} ") |
|
|
| |
| job_id = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+f"_framepackf1-videoinput_{width}-{total_second_length}sec_seed-{seed}_steps-{steps}_distilled-{gs}_cfg-{cfg}" |
|
|
| |
| stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Start sampling ...')))) |
|
|
| rnd = torch.Generator("cpu").manual_seed(seed) |
|
|
| |
| history_latents = video_latents.cpu() |
| total_generated_latent_frames = history_latents.shape[2] |
| |
| history_pixels = None |
| previous_video = None |
|
|
| |
| |
| |
|
|
| for section_index in range(total_latent_sections): |
| if stream.input_queue.top() == 'end': |
| stream.output_queue.push(('end', None)) |
| return |
|
|
| print(f'section_index = {section_index}, total_latent_sections = {total_latent_sections}') |
|
|
| if not high_vram: |
| unload_complete_models() |
| move_model_to_device_with_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=gpu_memory_preservation) |
|
|
| if use_teacache: |
| transformer.initialize_teacache(enable_teacache=True, num_steps=steps) |
| else: |
| transformer.initialize_teacache(enable_teacache=False) |
|
|
| def callback(d): |
| preview = d['denoised'] |
| preview = vae_decode_fake(preview) |
|
|
| preview = (preview * 255.0).detach().cpu().numpy().clip(0, 255).astype(np.uint8) |
| preview = einops.rearrange(preview, 'b c t h w -> (b h) (t w) c') |
|
|
| if stream.input_queue.top() == 'end': |
| stream.output_queue.push(('end', None)) |
| raise KeyboardInterrupt('User ends the task.') |
|
|
| current_step = d['i'] + 1 |
| percentage = int(100.0 * current_step / steps) |
| hint = f'Sampling {current_step}/{steps}' |
| desc = f'Total frames: {int(max(0, total_generated_latent_frames * 4 - 3))}, Video length: {max(0, (total_generated_latent_frames * 4 - 3) / fps) :.2f} seconds (FPS-{fps}), Seed: {seed}, Video {idx+1} of {batch}. The video is generating part {section_index+1} of {total_latent_sections}...' |
| stream.output_queue.push(('progress', (preview, desc, make_progress_bar_html(percentage, hint)))) |
| return |
|
|
| |
| available_frames = history_latents.shape[2] |
| max_pixel_frames = min(latent_window_size * 4 - 3, available_frames * 4) |
| adjusted_latent_frames = max(1, (max_pixel_frames + 3) // 4) |
| |
| effective_clean_frames = max(0, num_clean_frames - 1) if num_clean_frames > 1 else 0 |
| effective_clean_frames = min(effective_clean_frames, available_frames - 2) if available_frames > 2 else 0 |
| num_2x_frames = min(2, max(1, available_frames - effective_clean_frames - 1)) if available_frames > effective_clean_frames + 1 else 0 |
| num_4x_frames = min(16, max(1, available_frames - effective_clean_frames - num_2x_frames)) if available_frames > effective_clean_frames + num_2x_frames else 0 |
|
|
| total_context_frames = num_4x_frames + num_2x_frames + effective_clean_frames |
| total_context_frames = min(total_context_frames, available_frames) |
|
|
| indices = torch.arange(0, sum([1, num_4x_frames, num_2x_frames, effective_clean_frames, adjusted_latent_frames])).unsqueeze(0) |
| clean_latent_indices_start, clean_latent_4x_indices, clean_latent_2x_indices, clean_latent_1x_indices, latent_indices = indices.split( |
| [1, num_4x_frames, num_2x_frames, effective_clean_frames, adjusted_latent_frames], dim=1 |
| ) |
| clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1) |
|
|
| |
| fallback_frame_count = 2 |
| context_frames = history_latents[:, :, -total_context_frames:, :, :] if total_context_frames > 0 else history_latents[:, :, :fallback_frame_count, :, :] |
| if total_context_frames > 0: |
| split_sizes = [num_4x_frames, num_2x_frames, effective_clean_frames] |
| split_sizes = [s for s in split_sizes if s > 0] |
| if split_sizes: |
| splits = context_frames.split(split_sizes, dim=2) |
| split_idx = 0 |
| clean_latents_4x = splits[split_idx] if num_4x_frames > 0 else history_latents[:, :, :fallback_frame_count, :, :] |
| if clean_latents_4x.shape[2] < 2: |
| clean_latents_4x = torch.cat([clean_latents_4x, clean_latents_4x[:, :, -1:, :, :]], dim=2)[:, :, :2, :, :] |
| split_idx += 1 if num_4x_frames > 0 else 0 |
| clean_latents_2x = splits[split_idx] if num_2x_frames > 0 and split_idx < len(splits) else history_latents[:, :, :fallback_frame_count, :, :] |
| if clean_latents_2x.shape[2] < 2: |
| clean_latents_2x = torch.cat([clean_latents_2x, clean_latents_2x[:, :, -1:, :, :]], dim=2)[:, :, :2, :, :] |
| split_idx += 1 if num_2x_frames > 0 else 0 |
| clean_latents_1x = splits[split_idx] if effective_clean_frames > 0 and split_idx < len(splits) else history_latents[:, :, :fallback_frame_count, :, :] |
| else: |
| clean_latents_4x = clean_latents_2x = clean_latents_1x = history_latents[:, :, :fallback_frame_count, :, :] |
| else: |
| clean_latents_4x = clean_latents_2x = clean_latents_1x = history_latents[:, :, :fallback_frame_count, :, :] |
|
|
| clean_latents = torch.cat([start_latent.to(history_latents), clean_latents_1x], dim=2) |
|
|
| |
| max_frames = min(latent_window_size * 4 - 3, history_latents.shape[2] * 4) |
|
|
| generated_latents = sample_hunyuan( |
| transformer=transformer, |
| sampler='unipc', |
| width=width, |
| height=height, |
| frames=max_frames, |
| real_guidance_scale=cfg, |
| distilled_guidance_scale=gs, |
| guidance_rescale=rs, |
| num_inference_steps=steps, |
| generator=rnd, |
| prompt_embeds=llama_vec, |
| prompt_embeds_mask=llama_attention_mask, |
| prompt_poolers=clip_l_pooler, |
| negative_prompt_embeds=llama_vec_n, |
| negative_prompt_embeds_mask=llama_attention_mask_n, |
| negative_prompt_poolers=clip_l_pooler_n, |
| device=gpu, |
| dtype=torch.bfloat16, |
| image_embeddings=image_encoder_last_hidden_state, |
| latent_indices=latent_indices, |
| clean_latents=clean_latents, |
| clean_latent_indices=clean_latent_indices, |
| clean_latents_2x=clean_latents_2x, |
| clean_latent_2x_indices=clean_latent_2x_indices, |
| clean_latents_4x=clean_latents_4x, |
| clean_latent_4x_indices=clean_latent_4x_indices, |
| callback=callback, |
| ) |
|
|
| total_generated_latent_frames += int(generated_latents.shape[2]) |
| history_latents = torch.cat([history_latents, generated_latents.to(history_latents)], dim=2) |
|
|
| if not high_vram: |
| offload_model_from_device_for_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=8) |
| load_model_as_complete(vae, target_device=gpu) |
|
|
| real_history_latents = history_latents[:, :, -total_generated_latent_frames:, :, :] |
|
|
| if history_pixels is None: |
| history_pixels = vae_decode(real_history_latents, vae).cpu() |
| else: |
| section_latent_frames = latent_window_size * 2 |
| overlapped_frames = min(latent_window_size * 4 - 3, history_pixels.shape[2]) |
|
|
| |
| |
| |
| |
|
|
| current_pixels = vae_decode(real_history_latents[:, :, -section_latent_frames:], vae).cpu() |
| history_pixels = soft_append_bcthw(history_pixels, current_pixels, overlapped_frames) |
|
|
| if not high_vram: |
| unload_complete_models() |
|
|
| output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4') |
|
|
| |
| save_bcthw_as_mp4(history_pixels, output_filename, fps=fps, crf=mp4_crf) |
| print(f"Latest video saved: {output_filename}") |
| |
| set_mp4_comments_imageio_ffmpeg(output_filename, f"Prompt: {prompt} | Negative Prompt: {n_prompt}"); |
| print(f"Prompt saved to mp4 metadata comments: {output_filename}") |
|
|
| |
| if previous_video is not None and os.path.exists(previous_video): |
| try: |
| os.remove(previous_video) |
| print(f"Previous partial video deleted: {previous_video}") |
| except Exception as e: |
| print(f"Error deleting previous partial video {previous_video}: {e}") |
| previous_video = output_filename |
|
|
| print(f'Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}') |
|
|
| stream.output_queue.push(('file', output_filename)) |
|
|
| seed = (seed + 1) % np.iinfo(np.int32).max |
|
|
| except: |
| traceback.print_exc() |
|
|
| if not high_vram: |
| unload_complete_models( |
| text_encoder, text_encoder_2, image_encoder, vae, transformer |
| ) |
|
|
| stream.output_queue.push(('end', None)) |
| return |
|
|
| def get_duration_video(input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch): |
| global total_second_length_debug_value |
| if total_second_length_debug_value is not None: |
| return min(total_second_length_debug_value * 60 * 10, 600) |
| return total_second_length * 60 * 10 |
|
|
| |
| @spaces.GPU(duration=get_duration_video) |
| def process_video(input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch): |
| global stream, high_vram, input_video_debug_value, prompt_debug_value, total_second_length_debug_value |
|
|
| if torch.cuda.device_count() == 0: |
| gr.Warning('Set this space to GPU config to make it work.') |
| return None, None, None, None, None, None |
|
|
| if input_video_debug_value is not None: |
| input_video = input_video_debug_value |
| input_video_debug_value = None |
|
|
| if prompt_debug_value is not None: |
| prompt = prompt_debug_value |
| prompt_debug_value = None |
|
|
| if total_second_length_debug_value is not None: |
| total_second_length = total_second_length_debug_value |
| total_second_length_debug_value = None |
|
|
| if randomize_seed: |
| seed = random.randint(0, np.iinfo(np.int32).max) |
|
|
| |
| assert input_video is not None, 'No input video!' |
|
|
| yield None, None, '', '', gr.update(interactive=False), gr.update(interactive=True) |
|
|
| |
| if high_vram and (no_resize or resolution>640): |
| print("Disabling high vram mode due to no resize and/or potentially higher resolution...") |
| high_vram = False |
| vae.enable_slicing() |
| vae.enable_tiling() |
| DynamicSwapInstaller.install_model(transformer, device=gpu) |
| DynamicSwapInstaller.install_model(text_encoder, device=gpu) |
|
|
| |
| if cfg > 1: |
| gs = 1 |
|
|
| stream = AsyncStream() |
|
|
| |
| async_run(worker_video, input_video, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch) |
|
|
| output_filename = None |
|
|
| while True: |
| flag, data = stream.output_queue.next() |
|
|
| if flag == 'file': |
| output_filename = data |
| yield output_filename, gr.update(), gr.update(), gr.update(), gr.update(interactive=False), gr.update(interactive=True) |
|
|
| if flag == 'progress': |
| preview, desc, html = data |
| |
| yield output_filename, gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True) |
|
|
| if flag == 'end': |
| yield output_filename, gr.update(visible=False), desc+' Video complete.', '', gr.update(interactive=True), gr.update(interactive=False) |
| break |
|
|
| def end_process(): |
| stream.input_queue.push('end') |
|
|
|
|
| css = make_progress_bar_css() |
| block = gr.Blocks(css=css).queue() |
| with block: |
| if torch.cuda.device_count() == 0: |
| with gr.Row(): |
| gr.HTML(""" |
| <p style="background-color: red;"><big><big><big><b>⚠️To use FramePack, <a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/SUPIR?duplicate=true">duplicate this space</a> and set a GPU with 30 GB VRAM.</b> |
| |
| You can't use FramePack directly here because this space runs on a CPU, which is not enough for FramePack. Please provide <a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/SUPIR/discussions/new">feedback</a> if you have issues. |
| </big></big></big></p> |
| """) |
| |
| gr.Markdown('# Framepack F1 with Image Input or with Video Input (Video Extension)') |
| gr.Markdown(f"""### Video diffusion, but feels like image diffusion |
| *FramePack F1 - a FramePack model that only predicts future frames from history frames* |
| ### *beta* FramePack Fill 🖋️- draw a mask over the input image to inpaint the video output |
| adapted from the officical code repo [FramePack](https://github.com/lllyasviel/FramePack) by [lllyasviel](lllyasviel/FramePack_F1_I2V_HY_20250503) and [FramePack Studio](https://github.com/colinurbs/FramePack-Studio) 🙌🏻 |
| """) |
| with gr.Row(): |
| with gr.Column(): |
| input_video = gr.Video(sources='upload', label="Input Video", height=320) |
| prompt = gr.Textbox(label="Prompt", value='') |
|
|
| with gr.Row(): |
| start_button = gr.Button(value="Start Generation", variant="primary") |
| end_button = gr.Button(value="End Generation", variant="stop", interactive=False) |
|
|
| with gr.Accordion("Advanced settings", open=False): |
| with gr.Row(): |
| use_teacache = gr.Checkbox(label='Use TeaCache', value=False, info='Faster speed, but often makes hands and fingers slightly worse.') |
| no_resize = gr.Checkbox(label='Force Original Video Resolution (No Resizing)', value=False, info='Might run out of VRAM (720p requires > 24GB VRAM).') |
|
|
| randomize_seed = gr.Checkbox(label='Randomize seed', value=True, info='If checked, the seed is always different') |
| seed = gr.Slider(label="Seed", minimum=0, maximum=np.iinfo(np.int32).max, step=1, randomize=True) |
|
|
| batch = gr.Slider(label="Batch Size (Number of Videos)", minimum=1, maximum=1000, value=1, step=1, info='Generate multiple videos each with a different seed.') |
|
|
| resolution = gr.Number(label="Resolution (max width or height)", value=640, precision=0, visible=False) |
|
|
| total_second_length = gr.Slider(label="Additional Video Length to Generate (Seconds)", minimum=1, maximum=120, value=5, step=0.1) |
|
|
| |
| gs = gr.Slider(label="Distilled CFG Scale", minimum=1.0, maximum=32.0, value=3.0, step=0.01, info='Prompt adherence at the cost of less details from the input video, but to a lesser extent than Context Frames.') |
| cfg = gr.Slider(label="CFG Scale", minimum=1.0, maximum=32.0, value=1.0, step=0.01, visible=True, info='Use this instead of Distilled for more detail/control + Negative Prompt (make sure Distilled set to 1). Doubles render time.') |
| rs = gr.Slider(label="CFG Re-Scale", minimum=0.0, maximum=1.0, value=0.0, step=0.01, visible=False) |
|
|
| n_prompt = gr.Textbox(label="Negative Prompt", value="", visible=True, info='Requires using normal CFG (undistilled) instead of Distilled (set Distilled=1 and CFG > 1).') |
| steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=25, step=1, info='Increase for more quality, especially if using high non-distilled CFG.') |
|
|
| |
| num_clean_frames = gr.Slider(label="Number of Context Frames", minimum=2, maximum=10, value=5, step=1, info="Retain more video details but increase memory use. Reduce to 2 if memory issues.") |
|
|
| default_vae = 32 |
| if high_vram: |
| default_vae = 128 |
| elif free_mem_gb>=20: |
| default_vae = 64 |
|
|
| vae_batch = gr.Slider(label="VAE Batch Size for Input Video", minimum=4, maximum=256, value=default_vae, step=4, info="Reduce if running out of memory. Increase for better quality frames during fast motion.") |
|
|
| latent_window_size = gr.Slider(label="Latent Window Size", minimum=9, maximum=33, value=9, step=1, visible=True, info='Generate more frames at a time (larger chunks). Less degradation and better blending but higher VRAM cost.') |
|
|
| gpu_memory_preservation = gr.Slider(label="GPU Inference Preserved Memory (GB) (larger means slower)", minimum=6, maximum=128, value=6, step=0.1, info="Set this number to a larger value if you encounter OOM. Larger value causes slower speed.") |
|
|
| mp4_crf = gr.Slider(label="MP4 Compression", minimum=0, maximum=100, value=16, step=1, info="Lower means better quality. 0 is uncompressed. Change to 16 if you get black outputs. ") |
|
|
| with gr.Accordion("Debug", open=False): |
| input_video_debug = gr.Video(sources='upload', label="Input Video Debug", height=320) |
| prompt_debug = gr.Textbox(label="Prompt Debug", value='') |
| total_second_length_debug = gr.Slider(label="Additional Video Length to Generate (Seconds) Debug", minimum=1, maximum=120, value=1, step=0.1) |
|
|
| with gr.Column(): |
| preview_image = gr.Image(label="Next Latents", height=200, visible=False) |
| result_video = gr.Video(label="Finished Frames", autoplay=True, show_share_button=False, height=512, loop=True) |
| progress_desc = gr.Markdown('', elem_classes='no-generating-animation') |
| progress_bar = gr.HTML('', elem_classes='no-generating-animation') |
|
|
| with gr.Row(visible=False): |
| gr.Examples( |
| examples = [ |
| [ |
| "./img_examples/Example1.mp4", |
| "View of the sea as far as the eye can see, from the seaside, a piece of land is barely visible on the horizon at the middle, the sky is radiant, reflections of the sun in the water, photorealistic, realistic, intricate details, 8k, insanely detailed", |
| "", |
| True, |
| 42, |
| 1, |
| 640, |
| 1, |
| 9, |
| 25, |
| 1.0, |
| 10.0, |
| 0.0, |
| 6, |
| False, |
| False, |
| 16, |
| 5, |
| default_vae |
| ], |
| ], |
| run_on_click = True, |
| fn = process_video, |
| inputs = [input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch], |
| outputs = [result_video, preview_image, progress_desc, progress_bar, start_button, end_button], |
| cache_examples = True, |
| ) |
|
|
| gr.Markdown('## Guide') |
| gr.Markdown("I discourage to use the Text-to-Video feature. You should rather generate an image with Flux and use Image-to-Video. You will save time.") |
|
|
|
|
| |
| ips = [input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch] |
| start_button.click(fn=process_video, inputs=ips, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button]) |
| end_button.click(fn=end_process) |
|
|
| |
| def handle_field_debug_change(input_video_debug_data, prompt_debug_data, total_second_length_debug_data): |
| global input_video_debug_value, prompt_debug_value, total_second_length_debug_value |
| input_video_debug_value = input_video_debug_data |
| prompt_debug_value = prompt_debug_data |
| total_second_length_debug_value = total_second_length_debug_data |
| return [] |
| |
| input_video_debug.upload( |
| fn=handle_field_debug_change, |
| inputs=[input_video_debug, prompt_debug, total_second_length_debug], |
| outputs=[] |
| ) |
| |
| prompt_debug.change( |
| fn=handle_field_debug_change, |
| inputs=[input_video_debug, prompt_debug, total_second_length_debug], |
| outputs=[] |
| ) |
| |
| total_second_length_debug.change( |
| fn=handle_field_debug_change, |
| inputs=[input_video_debug, prompt_debug, total_second_length_debug], |
| outputs=[] |
| ) |
|
|
| block.launch(mcp_server=False, ssr_mode=False) |