rahul7star's picture
Add WanTransformerBlock.fp8da/example.py from zerogpu-aoti/Wan2
d9a1e9d verified
import spaces
import torch
from diffusers import DiffusionPipeline
from torchao import quantize_
pipeline = DiffusionPipeline.from_pretrained('Wan2.2')
quantize_(pipeline.transformer, Float8Config())
spaces.aoti_blocks_load(pipeline.transformer, 'zerogpu-aoti/Wan2', variant='fp8da')