import torch
from diffusers import DiffusionPipeline
# switch to "mps" for apple devices
pipe = DiffusionPipeline.from_pretrained("Erland/tiny-wan2.1-vsa-t2v-14b-720p-debug", dtype=torch.bfloat16, device_map="cuda")
prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
image = pipe(prompt).images[0]Tiny Wan2.1 VSA T2V 14B 720P Debug Pipeline
This is a randomly initialized, tiny WanPipeline fixture for FastVideo/Wan2.1-VSA-T2V-14B-720P-Diffusers.
FastVideo Wan2.1 VSA 14B 720P text-to-video layout represented as a tiny Diffusers WanPipeline artifact for FastVideo VideoGenerator and VIDEO_SPARSE_ATTN load-path debugging. The saved transformer weights include FastVideo VSA to_gate_compress tensors, which are not emitted by Diffusers.
It is intended for fast load-path and inference-control debugging only. It is not trained and should not be used for generation quality evaluation.
import os
from fastvideo import VideoGenerator
os.environ["FASTVIDEO_ATTENTION_BACKEND"] = "VIDEO_SPARSE_ATTN"
generator = VideoGenerator.from_pretrained(
"Erland/tiny-wan2.1-vsa-t2v-14b-720p-debug",
num_gpus=1,
pipeline_config={"flow_shift": 5.0},
VSA_sparsity=0.5,
)
try:
generator.generate_video(
"debug prompt",
output_path="my_videos/",
save_video=True,
height=64,
width=64,
num_frames=5,
num_inference_steps=3,
guidance_scale=1.0,
)
finally:
generator.shutdown()
- Downloads last month
- 24