Create README.md
Browse files
README.md
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: mit
|
| 3 |
+
---
|
| 4 |
+
|
| 5 |
+
# Text-to-Video Diffusion Model
|
| 6 |
+
|
| 7 |
+
This is a text-to-video diffusion model trained to generate video frames from text prompts.
|
| 8 |
+
|
| 9 |
+
## Usage
|
| 10 |
+
|
| 11 |
+
```python
|
| 12 |
+
import torch
|
| 13 |
+
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
|
| 14 |
+
from diffusers.utils import export_to_video
|
| 15 |
+
|
| 16 |
+
pipe = DiffusionPipeline.from_pretrained("your-username/text-to-video-diffusion", torch_dtype=torch.float16, variant="fp16")
|
| 17 |
+
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
|
| 18 |
+
pipe.enable_model_cpu_offload()
|
| 19 |
+
|
| 20 |
+
prompt = "Spiderman is surfing"
|
| 21 |
+
video_frames = pipe(prompt, num_inference_steps=25).frames
|
| 22 |
+
video_path = export_to_video(video_frames)
|