Spaces:
Paused
Paused
Commit
·
46b0441
1
Parent(s):
2246e78
Fix ZeroGPU worker error by removing manual device management
Browse files- Remove pipe.to(device) and enable_sequential_cpu_offload() calls
- Let ZeroGPU handle device placement automatically
- Increase duration to 180s for longer generation time
- Add traceback printing for better error debugging
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <noreply@anthropic.com>
app.py
CHANGED
|
@@ -23,7 +23,7 @@ pipe.vae.enable_slicing()
|
|
| 23 |
print("Model loaded successfully!")
|
| 24 |
|
| 25 |
|
| 26 |
-
@spaces.GPU(duration=
|
| 27 |
def generate_interpolation(
|
| 28 |
first_image,
|
| 29 |
last_image,
|
|
@@ -43,11 +43,6 @@ def generate_interpolation(
|
|
| 43 |
return None, "⚠️ Please provide a text prompt describing the motion!"
|
| 44 |
|
| 45 |
try:
|
| 46 |
-
# Move model to CUDA inside the decorated function
|
| 47 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 48 |
-
pipe.to(device)
|
| 49 |
-
pipe.enable_sequential_cpu_offload()
|
| 50 |
-
|
| 51 |
# Convert numpy arrays to PIL Images if needed
|
| 52 |
if not isinstance(first_image, Image.Image):
|
| 53 |
first_image = Image.fromarray(first_image)
|
|
@@ -59,8 +54,8 @@ def generate_interpolation(
|
|
| 59 |
f"Parameters: frames={num_frames}, steps={num_inference_steps}, guidance={guidance_scale}"
|
| 60 |
)
|
| 61 |
|
| 62 |
-
# Generate video
|
| 63 |
-
generator = torch.Generator(device=
|
| 64 |
|
| 65 |
video = pipe(
|
| 66 |
prompt=prompt,
|
|
@@ -88,6 +83,8 @@ def generate_interpolation(
|
|
| 88 |
except Exception as e:
|
| 89 |
error_msg = f"❌ Error: {str(e)}"
|
| 90 |
print(error_msg)
|
|
|
|
|
|
|
| 91 |
return None, error_msg
|
| 92 |
|
| 93 |
|
|
|
|
| 23 |
print("Model loaded successfully!")
|
| 24 |
|
| 25 |
|
| 26 |
+
@spaces.GPU(duration=180)
|
| 27 |
def generate_interpolation(
|
| 28 |
first_image,
|
| 29 |
last_image,
|
|
|
|
| 43 |
return None, "⚠️ Please provide a text prompt describing the motion!"
|
| 44 |
|
| 45 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
# Convert numpy arrays to PIL Images if needed
|
| 47 |
if not isinstance(first_image, Image.Image):
|
| 48 |
first_image = Image.fromarray(first_image)
|
|
|
|
| 54 |
f"Parameters: frames={num_frames}, steps={num_inference_steps}, guidance={guidance_scale}"
|
| 55 |
)
|
| 56 |
|
| 57 |
+
# Generate video - ZeroGPU handles device placement automatically
|
| 58 |
+
generator = torch.Generator(device="cuda").manual_seed(seed)
|
| 59 |
|
| 60 |
video = pipe(
|
| 61 |
prompt=prompt,
|
|
|
|
| 83 |
except Exception as e:
|
| 84 |
error_msg = f"❌ Error: {str(e)}"
|
| 85 |
print(error_msg)
|
| 86 |
+
import traceback
|
| 87 |
+
traceback.print_exc()
|
| 88 |
return None, error_msg
|
| 89 |
|
| 90 |
|