File size: 9,005 Bytes
eb3a68f 41e1888 eb3a68f 3b51f73 41e1888 eb3a68f 3b51f73 eb3a68f be28725 eb3a68f 41e1888 eb3a68f 41e1888 eb3a68f 41e1888 eb3a68f 41e1888 eb3a68f 41e1888 eb3a68f daaccd0 eb3a68f be28725 eb3a68f be28725 eb3a68f be28725 eb3a68f be28725 eb3a68f be28725 eb3a68f be28725 eb3a68f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 |
import time
import os
from huggingface_hub import hf_hub_download
import gradio as gr
import spaces
import torch
from diffusers import AutoencoderKLCogVideoX, CogVideoXDDIMScheduler
from diffusers.utils import export_to_video
from PIL import Image
from transformers import T5EncoderModel, T5Tokenizer
from cogvideo_transformer import CustomCogVideoXTransformer3DModel
from EF_Net import EF_Net
from Sci_Fi_inbetweening_pipeline import CogVideoXEFNetInbetweeningPipeline
# Global variables for the pipeline
pipe = None
device = "cuda" if torch.cuda.is_available() else "cpu"
def _load_pipeline_internal(
pretrained_model_path="LiuhanChen/Sci-Fi",
ef_net_path="weights/EF_Net.pth",
dtype_str="bfloat16",
):
"""Internal function to load the Sci-Fi pipeline"""
global pipe
# Return early if pipeline is already loaded
if pipe is not None:
return "Pipeline already loaded!"
dtype = torch.float16 if dtype_str == "float16" else torch.bfloat16
# Download EF-Net weights if not exists
if not os.path.exists(ef_net_path):
print("Downloading EF-Net weights from Hugging Face...")
os.makedirs("weights", exist_ok=True)
ef_net_path = hf_hub_download(
repo_id="LiuhanChen/Sci-Fi",
subfolder="EF_Net",
filename="EF_Net.pth",
local_dir="weights"
)
ef_net_path = "weights/EF_Net/EF_Net.pth"
print(f"EF-Net weights downloaded to {ef_net_path}")
# Load models from Hugging Face
tokenizer = T5Tokenizer.from_pretrained(
pretrained_model_path, subfolder="CogVideoX-5b-I2V/tokenizer"
)
text_encoder = T5EncoderModel.from_pretrained(
pretrained_model_path, subfolder="CogVideoX-5b-I2V/text_encoder"
)
transformer = CustomCogVideoXTransformer3DModel.from_pretrained(
pretrained_model_path, subfolder="CogVideoX-5b-I2V/transformer"
)
vae = AutoencoderKLCogVideoX.from_pretrained(
pretrained_model_path, subfolder="CogVideoX-5b-I2V/vae"
)
scheduler = CogVideoXDDIMScheduler.from_pretrained(
pretrained_model_path, subfolder="CogVideoX-5b-I2V/scheduler"
)
# Load EF-Net
EF_Net_model = (
EF_Net(num_layers=4, downscale_coef=8, in_channels=2, num_attention_heads=48)
.requires_grad_(False)
.eval()
)
ckpt = torch.load(ef_net_path, map_location="cpu", weights_only=False)
EF_Net_state_dict = {name: params for name, params in ckpt["state_dict"].items()}
m, u = EF_Net_model.load_state_dict(EF_Net_state_dict, strict=False)
print(f"[EF-Net loaded] Missing: {len(m)} | Unexpected: {len(u)}")
# Create pipeline
pipe = CogVideoXEFNetInbetweeningPipeline(
tokenizer=tokenizer,
text_encoder=text_encoder,
transformer=transformer,
vae=vae,
EF_Net_model=EF_Net_model,
scheduler=scheduler,
)
pipe.scheduler = CogVideoXDDIMScheduler.from_config(
pipe.scheduler.config, timestep_spacing="trailing"
)
pipe.to(device)
pipe = pipe.to(dtype=dtype)
pipe.vae.enable_slicing()
pipe.vae.enable_tiling()
return "Pipeline loaded successfully!"
@spaces.GPU(duration=500)
def generate_inbetweening(
first_image: Image.Image,
last_image: Image.Image,
prompt: str,
num_frames: int = 49,
guidance_scale: float = 6.0,
ef_net_weights: float = 1.0,
ef_net_guidance_start: float = 0.0,
ef_net_guidance_end: float = 1.0,
seed: int = 42,
progress=gr.Progress(),
):
"""Generate frame inbetweening video"""
global pipe
# Load pipeline on first use (lazy loading with GPU access)
if pipe is None:
progress(0, desc="Loading pipeline (first run)...")
try:
_load_pipeline_internal()
except Exception as e:
return None, f"ERROR: Failed to load pipeline: {str(e)}"
if first_image is None or last_image is None:
return None, "Please upload both start and end frames!"
if not prompt.strip():
return None, "Please provide a text prompt!"
try:
progress(0.2, desc="Starting generation...")
start_time = time.time()
# Generate video
progress(0.4, desc="Processing frames...")
video_frames = pipe(
first_image=first_image,
last_image=last_image,
prompt=prompt,
num_frames=num_frames,
use_dynamic_cfg=False,
guidance_scale=guidance_scale,
generator=torch.Generator(device=device).manual_seed(seed),
EF_Net_weights=ef_net_weights,
EF_Net_guidance_start=ef_net_guidance_start,
EF_Net_guidance_end=ef_net_guidance_end,
).frames[0]
progress(0.9, desc="Exporting video...")
# Export video
output_path = f"output_{int(time.time())}.mp4"
export_to_video(video_frames, output_path, fps=7)
elapsed_time = time.time() - start_time
status_msg = f"Video generated successfully in {elapsed_time:.2f}s"
progress(1.0, desc="Done!")
return output_path, status_msg
except Exception as e:
return None, f"Error: {str(e)}"
# Create Gradio interface
with gr.Blocks(title="Sci-Fi: Frame Inbetweening") as demo:
gr.Markdown(
"""
# Sci-Fi: Symmetric Constraint for Frame Inbetweening
Upload start and end frames to generate smooth inbetweening video.
**Note:** Pipeline loads on first generation (this may take 1-2 minutes).
"""
)
with gr.Tab("Generate"):
with gr.Row():
with gr.Column():
first_image = gr.Image(label="Start Frame", type="pil")
last_image = gr.Image(label="End Frame", type="pil")
with gr.Column():
prompt = gr.Textbox(
label="Prompt",
placeholder="Describe the motion or content...",
lines=3,
)
with gr.Accordion("Advanced Settings", open=False):
num_frames = gr.Slider(
minimum=13,
maximum=49,
value=49,
step=12,
label="Number of Frames",
)
guidance_scale = gr.Slider(
minimum=1.0,
maximum=15.0,
value=6.0,
step=0.5,
label="Guidance Scale",
)
ef_net_weights = gr.Slider(
minimum=0.0,
maximum=2.0,
value=1.0,
step=0.1,
label="EF-Net Weights",
)
ef_net_guidance_start = gr.Slider(
minimum=0.0,
maximum=1.0,
value=0.0,
step=0.1,
label="EF-Net Guidance Start",
)
ef_net_guidance_end = gr.Slider(
minimum=0.0,
maximum=1.0,
value=1.0,
step=0.1,
label="EF-Net Guidance End",
)
seed = gr.Number(label="Seed", value=42, precision=0)
generate_btn = gr.Button("Generate Video", variant="primary", size="lg")
with gr.Row():
output_video = gr.Video(label="Generated Video")
status_text = gr.Textbox(label="Status", lines=2)
generate_btn.click(
fn=generate_inbetweening,
inputs=[
first_image,
last_image,
prompt,
num_frames,
guidance_scale,
ef_net_weights,
ef_net_guidance_start,
ef_net_guidance_end,
seed,
],
outputs=[output_video, status_text],
)
with gr.Tab("Examples"):
gr.Markdown(
"""
## Example Inputs
Try these example frame pairs from the `example_input_pairs/` folder.
"""
)
gr.Examples(
examples=[
[
"example_input_pairs/input_pair1/start.jpg",
"example_input_pairs/input_pair1/end.jpg",
"A smooth transition between frames",
],
[
"example_input_pairs/input_pair2/start.jpg",
"example_input_pairs/input_pair2/end.jpg",
"Natural motion interpolation",
],
],
inputs=[first_image, last_image, prompt],
)
if __name__ == "__main__":
print("App starting - pipeline will load on first generation request")
demo.launch()
|