text stringlengths 0 5.54k |
|---|
import torch |
from io import BytesIO |
from diffusers import StableDiffusionInpaintPipeline |
def download_image(url): |
response = requests.get(url) |
return PIL.Image.open(BytesIO(response.content)).convert("RGB") |
img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" |
mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" |
init_image = download_image(img_url).resize((512, 512)) |
mask_image = download_image(mask_url).resize((512, 512)) |
pipe = StableDiffusionInpaintPipeline.from_pretrained( |
"runwayml/stable-diffusion-inpainting", |
torch_dtype=torch.float16, |
) |
pipe = pipe.to("cuda") |
prompt = "Face of a yellow cat, high resolution, sitting on a park bench" |
image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0] |
You can also run this example on colab |
Text-to-Video Generation with AnimateDiff Overview AnimateDiff: Animate Your Personalized Text-to-Image Diffusion Models without Specific Tuning by Yuwei Guo, Ceyuan Yang, Anyi Rao, Yaohui Wang, Yu Qiao, Dahua Lin, Bo Dai. The abstract of the paper is the following: With the advance of text-to-image models (e.g., Stab... |
from diffusers import AnimateDiffPipeline, DDIMScheduler, MotionAdapter |
from diffusers.utils import export_to_gif |
# Load the motion adapter |
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16) |
# load SD 1.5 based finetuned model |
model_id = "SG161222/Realistic_Vision_V5.1_noVAE" |
pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16) |
scheduler = DDIMScheduler.from_pretrained( |
model_id, |
subfolder="scheduler", |
clip_sample=False, |
timestep_spacing="linspace", |
beta_schedule="linear", |
steps_offset=1, |
) |
pipe.scheduler = scheduler |
# enable memory savings |
pipe.enable_vae_slicing() |
pipe.enable_model_cpu_offload() |
output = pipe( |
prompt=( |
"masterpiece, bestquality, highlydetailed, ultradetailed, sunset, " |
"orange sky, warm lighting, fishing boats, ocean waves seagulls, " |
"rippling water, wharf, silhouette, serene atmosphere, dusk, evening glow, " |
"golden hour, coastal landscape, seaside scenery" |
), |
negative_prompt="bad quality, worse quality", |
num_frames=16, |
guidance_scale=7.5, |
num_inference_steps=25, |
generator=torch.Generator("cpu").manual_seed(42), |
) |
frames = output.frames[0] |
export_to_gif(frames, "animation.gif") |
Here are some sample outputs: masterpiece, bestquality, sunset. |
AnimateDiff tends to work better with finetuned Stable Diffusion models. If you plan on using a scheduler that can clip samples, make sure to disable it by setting clip_sample=False in the scheduler as this can also have an adverse effect on generated samples. Additionally, the AnimateDiff checkpoints can be ... |
import requests |
import torch |
from diffusers import AnimateDiffVideoToVideoPipeline, DDIMScheduler, MotionAdapter |
from diffusers.utils import export_to_gif |
from io import BytesIO |
from PIL import Image |
# Load the motion adapter |
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16) |
# load SD 1.5 based finetuned model |
model_id = "SG161222/Realistic_Vision_V5.1_noVAE" |
pipe = AnimateDiffVideoToVideoPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16).to("cuda") |
scheduler = DDIMScheduler.from_pretrained( |
model_id, |
subfolder="scheduler", |
clip_sample=False, |
timestep_spacing="linspace", |
beta_schedule="linear", |
steps_offset=1, |
) |
pipe.scheduler = scheduler |
# enable memory savings |
pipe.enable_vae_slicing() |
pipe.enable_model_cpu_offload() |
# helper function to load videos |
def load_video(file_path: str): |
images = [] |
if file_path.startswith(('http://', 'https://')): |
# If the file_path is a URL |
response = requests.get(file_path) |
response.raise_for_status() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.