Tune-A-Video: One-Shot Tuning of Image Diffusion Models for Text-to-Video Generation
Paper • 2212.11565 • Published • 3
import torch
from diffusers import DiffusionPipeline
# switch to "mps" for apple devices
pipe = DiffusionPipeline.from_pretrained("Tune-A-Video-library/talking-man", dtype=torch.bfloat16, device_map="cuda")
prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
image = pipe(prompt).images[0]Base model
CompVis/stable-diffusion-v1-4