text stringlengths 0 5.54k |
|---|
pipe.enable_vae_slicing() |
pipe.enable_model_cpu_offload() |
output = pipe( |
prompt=( |
"masterpiece, bestquality, highlydetailed, ultradetailed, sunset, " |
"orange sky, warm lighting, fishing boats, ocean waves seagulls, " |
"rippling water, wharf, silhouette, serene atmosphere, dusk, evening glow, " |
"golden hour, coastal landscape, seaside scenery" |
), |
negative_prompt="bad quality, worse quality", |
num_frames=16, |
guidance_scale=7.5, |
num_inference_steps=25, |
generator=torch.Generator("cpu").manual_seed(42), |
) |
frames = output.frames[0] |
export_to_gif(frames, "animation.gif") |
masterpiece, bestquality, sunset. |
Using FreeInit FreeInit: Bridging Initialization Gap in Video Diffusion Models by Tianxing Wu, Chenyang Si, Yuming Jiang, Ziqi Huang, Ziwei Liu. FreeInit is an effective method that improves temporal consistency and overall quality of videos generated using video-diffusion-models without any addition trainin... |
from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler |
from diffusers.utils import export_to_gif |
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2") |
model_id = "SG161222/Realistic_Vision_V5.1_noVAE" |
pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16).to("cuda") |
pipe.scheduler = DDIMScheduler.from_pretrained( |
model_id, |
subfolder="scheduler", |
beta_schedule="linear", |
clip_sample=False, |
timestep_spacing="linspace", |
steps_offset=1 |
) |
# enable memory savings |
pipe.enable_vae_slicing() |
pipe.enable_vae_tiling() |
# enable FreeInit |
# Refer to the enable_free_init documentation for a full list of configurable parameters |
pipe.enable_free_init(method="butterworth", use_fast_sampling=True) |
# run inference |
output = pipe( |
prompt="a panda playing a guitar, on a boat, in the ocean, high quality", |
negative_prompt="bad quality, worse quality", |
num_frames=16, |
guidance_scale=7.5, |
num_inference_steps=20, |
generator=torch.Generator("cpu").manual_seed(666), |
) |
# disable FreeInit |
pipe.disable_free_init() |
frames = output.frames[0] |
export_to_gif(frames, "animation.gif") FreeInit is not really free - the improved quality comes at the cost of extra computation. It requires sampling a few extra times depending on the num_iters parameter that is set when enabling it. Setting the use_fast_sampling parameter to True can improve the overall performance ... |
from diffusers import AnimateDiffPipeline, LCMScheduler, MotionAdapter |
from diffusers.utils import export_to_gif |
adapter = MotionAdapter.from_pretrained("wangfuyun/AnimateLCM") |
pipe = AnimateDiffPipeline.from_pretrained("emilianJR/epiCRealism", motion_adapter=adapter) |
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config, beta_schedule="linear") |
pipe.load_lora_weights("wangfuyun/AnimateLCM", weight_name="sd15_lora_beta.safetensors", adapter_name="lcm-lora") |
pipe.enable_vae_slicing() |
pipe.enable_model_cpu_offload() |
output = pipe( |
prompt="A space rocket with trails of smoke behind it launching into space from the desert, 4k, high resolution", |
negative_prompt="bad quality, worse quality, low resolution", |
num_frames=16, |
guidance_scale=1.5, |
num_inference_steps=6, |
generator=torch.Generator("cpu").manual_seed(0), |
) |
frames = output.frames[0] |
export_to_gif(frames, "animatelcm.gif") A space rocket, 4K. |
AnimateLCM is also compatible with existing Motion LoRAs. Copied import torch |
from diffusers import AnimateDiffPipeline, LCMScheduler, MotionAdapter |
from diffusers.utils import export_to_gif |
adapter = MotionAdapter.from_pretrained("wangfuyun/AnimateLCM") |
pipe = AnimateDiffPipeline.from_pretrained("emilianJR/epiCRealism", motion_adapter=adapter) |
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config, beta_schedule="linear") |
pipe.load_lora_weights("wangfuyun/AnimateLCM", weight_name="sd15_lora_beta.safetensors", adapter_name="lcm-lora") |
pipe.load_lora_weights("guoyww/animatediff-motion-lora-tilt-up", adapter_name="tilt-up") |
pipe.set_adapters(["lcm-lora", "tilt-up"], [1.0, 0.8]) |
pipe.enable_vae_slicing() |
pipe.enable_model_cpu_offload() |
output = pipe( |
prompt="A space rocket with trails of smoke behind it launching into space from the desert, 4k, high resolution", |
negative_prompt="bad quality, worse quality, low resolution", |
num_frames=16, |
guidance_scale=1.5, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.