Commit
·
be28725
1
Parent(s):
3b51f73
rename
Browse files
app.py
CHANGED
|
@@ -25,6 +25,10 @@ def _load_pipeline_internal(
|
|
| 25 |
"""Internal function to load the Sci-Fi pipeline"""
|
| 26 |
global pipe
|
| 27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
dtype = torch.float16 if dtype_str == "float16" else torch.bfloat16
|
| 29 |
|
| 30 |
# Load models
|
|
@@ -76,16 +80,6 @@ def _load_pipeline_internal(
|
|
| 76 |
return "Pipeline loaded successfully!"
|
| 77 |
|
| 78 |
|
| 79 |
-
@spaces.GPU(duration=1000)
|
| 80 |
-
def load_pipeline(
|
| 81 |
-
pretrained_model_path="THUDM/CogVideoX-5b",
|
| 82 |
-
ef_net_path="weights/EF_Net.pth",
|
| 83 |
-
dtype_str="bfloat16",
|
| 84 |
-
):
|
| 85 |
-
"""Load the Sci-Fi pipeline (Gradio wrapper)"""
|
| 86 |
-
return _load_pipeline_internal(pretrained_model_path, ef_net_path, dtype_str)
|
| 87 |
-
|
| 88 |
-
|
| 89 |
@spaces.GPU(duration=1000)
|
| 90 |
def generate_inbetweening(
|
| 91 |
first_image: Image.Image,
|
|
@@ -102,8 +96,13 @@ def generate_inbetweening(
|
|
| 102 |
"""Generate frame inbetweening video"""
|
| 103 |
global pipe
|
| 104 |
|
|
|
|
| 105 |
if pipe is None:
|
| 106 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
|
| 108 |
if first_image is None or last_image is None:
|
| 109 |
return None, "Please upload both start and end frames!"
|
|
@@ -112,11 +111,11 @@ def generate_inbetweening(
|
|
| 112 |
return None, "Please provide a text prompt!"
|
| 113 |
|
| 114 |
try:
|
| 115 |
-
progress(0, desc="Starting generation...")
|
| 116 |
start_time = time.time()
|
| 117 |
|
| 118 |
# Generate video
|
| 119 |
-
progress(0.
|
| 120 |
video_frames = pipe(
|
| 121 |
first_image=first_image,
|
| 122 |
last_image=last_image,
|
|
@@ -154,7 +153,7 @@ with gr.Blocks(title="Sci-Fi: Frame Inbetweening") as demo:
|
|
| 154 |
|
| 155 |
Upload start and end frames to generate smooth inbetweening video.
|
| 156 |
|
| 157 |
-
**Note:**
|
| 158 |
"""
|
| 159 |
)
|
| 160 |
|
|
@@ -231,44 +230,6 @@ with gr.Blocks(title="Sci-Fi: Frame Inbetweening") as demo:
|
|
| 231 |
outputs=[output_video, status_text],
|
| 232 |
)
|
| 233 |
|
| 234 |
-
with gr.Tab("Setup"):
|
| 235 |
-
gr.Markdown(
|
| 236 |
-
"""
|
| 237 |
-
## Load Pipeline
|
| 238 |
-
|
| 239 |
-
Configure and load the model before generating videos.
|
| 240 |
-
|
| 241 |
-
**Default paths:**
|
| 242 |
-
- Model: `THUDM/CogVideoX-5b` (or your downloaded path)
|
| 243 |
-
- EF-Net: `weights/EF_Net.pth`
|
| 244 |
-
"""
|
| 245 |
-
)
|
| 246 |
-
|
| 247 |
-
with gr.Row():
|
| 248 |
-
model_path = gr.Textbox(
|
| 249 |
-
label="Pretrained Model Path",
|
| 250 |
-
value="THUDM/CogVideoX-5b",
|
| 251 |
-
placeholder="Path to CogVideoX model",
|
| 252 |
-
)
|
| 253 |
-
ef_net_path = gr.Textbox(
|
| 254 |
-
label="EF-Net Checkpoint Path",
|
| 255 |
-
value="weights/EF_Net.pth",
|
| 256 |
-
placeholder="Path to EF-Net weights",
|
| 257 |
-
)
|
| 258 |
-
|
| 259 |
-
dtype_choice = gr.Radio(
|
| 260 |
-
choices=["bfloat16", "float16"], value="bfloat16", label="Data Type"
|
| 261 |
-
)
|
| 262 |
-
|
| 263 |
-
load_btn = gr.Button("Load Pipeline", variant="primary")
|
| 264 |
-
load_status = gr.Textbox(label="Load Status", interactive=False)
|
| 265 |
-
|
| 266 |
-
load_btn.click(
|
| 267 |
-
fn=load_pipeline,
|
| 268 |
-
inputs=[model_path, ef_net_path, dtype_choice],
|
| 269 |
-
outputs=load_status,
|
| 270 |
-
)
|
| 271 |
-
|
| 272 |
with gr.Tab("Examples"):
|
| 273 |
gr.Markdown(
|
| 274 |
"""
|
|
@@ -295,13 +256,5 @@ with gr.Blocks(title="Sci-Fi: Frame Inbetweening") as demo:
|
|
| 295 |
)
|
| 296 |
|
| 297 |
if __name__ == "__main__":
|
| 298 |
-
|
| 299 |
-
print("Loading pipeline automatically on startup...")
|
| 300 |
-
try:
|
| 301 |
-
_load_pipeline_internal()
|
| 302 |
-
print("Pipeline loaded successfully!")
|
| 303 |
-
except Exception as e:
|
| 304 |
-
print(f"Failed to load pipeline on startup: {e}")
|
| 305 |
-
print("You can manually load it from the Setup tab.")
|
| 306 |
-
|
| 307 |
demo.launch()
|
|
|
|
| 25 |
"""Internal function to load the Sci-Fi pipeline"""
|
| 26 |
global pipe
|
| 27 |
|
| 28 |
+
# Return early if pipeline is already loaded
|
| 29 |
+
if pipe is not None:
|
| 30 |
+
return "Pipeline already loaded!"
|
| 31 |
+
|
| 32 |
dtype = torch.float16 if dtype_str == "float16" else torch.bfloat16
|
| 33 |
|
| 34 |
# Load models
|
|
|
|
| 80 |
return "Pipeline loaded successfully!"
|
| 81 |
|
| 82 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 83 |
@spaces.GPU(duration=1000)
|
| 84 |
def generate_inbetweening(
|
| 85 |
first_image: Image.Image,
|
|
|
|
| 96 |
"""Generate frame inbetweening video"""
|
| 97 |
global pipe
|
| 98 |
|
| 99 |
+
# Load pipeline on first use (lazy loading with GPU access)
|
| 100 |
if pipe is None:
|
| 101 |
+
progress(0, desc="Loading pipeline (first run)...")
|
| 102 |
+
try:
|
| 103 |
+
_load_pipeline_internal()
|
| 104 |
+
except Exception as e:
|
| 105 |
+
return None, f"ERROR: Failed to load pipeline: {str(e)}"
|
| 106 |
|
| 107 |
if first_image is None or last_image is None:
|
| 108 |
return None, "Please upload both start and end frames!"
|
|
|
|
| 111 |
return None, "Please provide a text prompt!"
|
| 112 |
|
| 113 |
try:
|
| 114 |
+
progress(0.2, desc="Starting generation...")
|
| 115 |
start_time = time.time()
|
| 116 |
|
| 117 |
# Generate video
|
| 118 |
+
progress(0.4, desc="Processing frames...")
|
| 119 |
video_frames = pipe(
|
| 120 |
first_image=first_image,
|
| 121 |
last_image=last_image,
|
|
|
|
| 153 |
|
| 154 |
Upload start and end frames to generate smooth inbetweening video.
|
| 155 |
|
| 156 |
+
**Note:** Pipeline loads on first generation (this may take 1-2 minutes).
|
| 157 |
"""
|
| 158 |
)
|
| 159 |
|
|
|
|
| 230 |
outputs=[output_video, status_text],
|
| 231 |
)
|
| 232 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 233 |
with gr.Tab("Examples"):
|
| 234 |
gr.Markdown(
|
| 235 |
"""
|
|
|
|
| 256 |
)
|
| 257 |
|
| 258 |
if __name__ == "__main__":
|
| 259 |
+
print("App starting - pipeline will load on first generation request")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 260 |
demo.launch()
|