Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,7 +3,7 @@ from diffusers import StableVideoDiffusionPipeline, EulerDiscreteScheduler
|
|
| 3 |
import torch
|
| 4 |
from PIL import Image
|
| 5 |
import tempfile
|
| 6 |
-
|
| 7 |
# Load the Stable Video Diffusion model
|
| 8 |
model_id = "stabilityai/stable-video-diffusion-img2vid-xt"
|
| 9 |
try:
|
|
@@ -13,6 +13,7 @@ try:
|
|
| 13 |
except Exception as e:
|
| 14 |
raise RuntimeError(f"Failed to load the model: {e}")
|
| 15 |
|
|
|
|
| 16 |
def generate_video(image, num_frames=25, height=576, width=1024):
|
| 17 |
try:
|
| 18 |
# Convert the image to a format suitable for the pipeline
|
|
|
|
| 3 |
import torch
|
| 4 |
from PIL import Image
|
| 5 |
import tempfile
|
| 6 |
+
import spaces
|
| 7 |
# Load the Stable Video Diffusion model
|
| 8 |
model_id = "stabilityai/stable-video-diffusion-img2vid-xt"
|
| 9 |
try:
|
|
|
|
| 13 |
except Exception as e:
|
| 14 |
raise RuntimeError(f"Failed to load the model: {e}")
|
| 15 |
|
| 16 |
+
@spaces.GPU
|
| 17 |
def generate_video(image, num_frames=25, height=576, width=1024):
|
| 18 |
try:
|
| 19 |
# Convert the image to a format suitable for the pipeline
|