Spaces:
Running
on
Zero
Running
on
Zero
Migrate to ZeroGPU (#32)
Browse files- Migrate to ZeroGPU (9ac4dbd5b6feda5ab15a3e1e01cd3cd6615c7a62)
- Update requirements.txt (61bef8f4aacbb91d0c30a23bebeef127ca56ee68)
- Update app.py (d9934ce2d14def8c5df228c62ecfb7d701a12012)
Co-authored-by: Apolinário from multimodal AI art <multimodalart@users.noreply.huggingface.co>
- app.py +5 -4
- requirements.txt +0 -4
app.py
CHANGED
|
@@ -17,6 +17,7 @@ import tempfile
|
|
| 17 |
import imageio_ffmpeg
|
| 18 |
import gradio as gr
|
| 19 |
import torch
|
|
|
|
| 20 |
from PIL import Image
|
| 21 |
from diffusers import (
|
| 22 |
CogVideoXPipeline,
|
|
@@ -30,7 +31,7 @@ from datetime import datetime, timedelta
|
|
| 30 |
|
| 31 |
from diffusers.image_processor import VaeImageProcessor
|
| 32 |
from openai import OpenAI
|
| 33 |
-
import moviepy
|
| 34 |
import utils
|
| 35 |
from rife_model import load_rife_model, rife_inference_with_latents
|
| 36 |
from huggingface_hub import hf_hub_download, snapshot_download
|
|
@@ -196,7 +197,7 @@ def convert_prompt(prompt: str, retry_times: int = 3) -> str:
|
|
| 196 |
return response.choices[0].message.content
|
| 197 |
return prompt
|
| 198 |
|
| 199 |
-
|
| 200 |
def infer(
|
| 201 |
prompt: str,
|
| 202 |
image_input: str,
|
|
@@ -281,8 +282,8 @@ def infer(
|
|
| 281 |
|
| 282 |
def convert_to_gif(video_path):
|
| 283 |
clip = mp.VideoFileClip(video_path)
|
| 284 |
-
clip = clip.
|
| 285 |
-
clip = clip.
|
| 286 |
gif_path = video_path.replace(".mp4", ".gif")
|
| 287 |
clip.write_gif(gif_path, fps=8)
|
| 288 |
return gif_path
|
|
|
|
| 17 |
import imageio_ffmpeg
|
| 18 |
import gradio as gr
|
| 19 |
import torch
|
| 20 |
+
import spaces
|
| 21 |
from PIL import Image
|
| 22 |
from diffusers import (
|
| 23 |
CogVideoXPipeline,
|
|
|
|
| 31 |
|
| 32 |
from diffusers.image_processor import VaeImageProcessor
|
| 33 |
from openai import OpenAI
|
| 34 |
+
import moviepy as mp
|
| 35 |
import utils
|
| 36 |
from rife_model import load_rife_model, rife_inference_with_latents
|
| 37 |
from huggingface_hub import hf_hub_download, snapshot_download
|
|
|
|
| 197 |
return response.choices[0].message.content
|
| 198 |
return prompt
|
| 199 |
|
| 200 |
+
@spaces.GPU(duration=300)
|
| 201 |
def infer(
|
| 202 |
prompt: str,
|
| 203 |
image_input: str,
|
|
|
|
| 282 |
|
| 283 |
def convert_to_gif(video_path):
|
| 284 |
clip = mp.VideoFileClip(video_path)
|
| 285 |
+
clip = clip.with_fps(8)
|
| 286 |
+
clip = clip.resized(height=240)
|
| 287 |
gif_path = video_path.replace(".mp4", ".gif")
|
| 288 |
clip.write_gif(gif_path, fps=8)
|
| 289 |
return gif_path
|
requirements.txt
CHANGED
|
@@ -1,4 +1,3 @@
|
|
| 1 |
-
spaces>=0.29.3
|
| 2 |
safetensors>=0.4.5
|
| 3 |
spandrel>=0.4.0
|
| 4 |
tqdm>=4.66.5
|
|
@@ -9,9 +8,6 @@ accelerate>=0.34.2
|
|
| 9 |
opencv-python>=4.10.0.84
|
| 10 |
sentencepiece>=0.2.0
|
| 11 |
numpy==1.26.0
|
| 12 |
-
torch==2.2.0
|
| 13 |
-
torchvision
|
| 14 |
-
gradio>=4.44.0
|
| 15 |
imageio>=2.34.2
|
| 16 |
imageio-ffmpeg>=0.5.1
|
| 17 |
openai>=1.45.0
|
|
|
|
|
|
|
| 1 |
safetensors>=0.4.5
|
| 2 |
spandrel>=0.4.0
|
| 3 |
tqdm>=4.66.5
|
|
|
|
| 8 |
opencv-python>=4.10.0.84
|
| 9 |
sentencepiece>=0.2.0
|
| 10 |
numpy==1.26.0
|
|
|
|
|
|
|
|
|
|
| 11 |
imageio>=2.34.2
|
| 12 |
imageio-ffmpeg>=0.5.1
|
| 13 |
openai>=1.45.0
|