|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import sys |
|
|
import unittest |
|
|
|
|
|
import torch |
|
|
from transformers import AutoTokenizer, T5EncoderModel |
|
|
|
|
|
from diffusers import ( |
|
|
AutoencoderKLWan, |
|
|
FlowMatchEulerDiscreteScheduler, |
|
|
WanPipeline, |
|
|
WanTransformer3DModel, |
|
|
) |
|
|
from diffusers.utils.testing_utils import ( |
|
|
floats_tensor, |
|
|
require_peft_backend, |
|
|
skip_mps, |
|
|
) |
|
|
|
|
|
|
|
|
sys.path.append(".") |
|
|
|
|
|
from utils import PeftLoraLoaderMixinTests |
|
|
|
|
|
|
|
|
@require_peft_backend |
|
|
@skip_mps |
|
|
class WanLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): |
|
|
pipeline_class = WanPipeline |
|
|
scheduler_cls = FlowMatchEulerDiscreteScheduler |
|
|
scheduler_classes = [FlowMatchEulerDiscreteScheduler] |
|
|
scheduler_kwargs = {} |
|
|
|
|
|
transformer_kwargs = { |
|
|
"patch_size": (1, 2, 2), |
|
|
"num_attention_heads": 2, |
|
|
"attention_head_dim": 12, |
|
|
"in_channels": 16, |
|
|
"out_channels": 16, |
|
|
"text_dim": 32, |
|
|
"freq_dim": 256, |
|
|
"ffn_dim": 32, |
|
|
"num_layers": 2, |
|
|
"cross_attn_norm": True, |
|
|
"qk_norm": "rms_norm_across_heads", |
|
|
"rope_max_seq_len": 32, |
|
|
} |
|
|
transformer_cls = WanTransformer3DModel |
|
|
vae_kwargs = { |
|
|
"base_dim": 3, |
|
|
"z_dim": 16, |
|
|
"dim_mult": [1, 1, 1, 1], |
|
|
"num_res_blocks": 1, |
|
|
"temperal_downsample": [False, True, True], |
|
|
} |
|
|
vae_cls = AutoencoderKLWan |
|
|
has_two_text_encoders = True |
|
|
tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5" |
|
|
text_encoder_cls, text_encoder_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5" |
|
|
|
|
|
text_encoder_target_modules = ["q", "k", "v", "o"] |
|
|
|
|
|
@property |
|
|
def output_shape(self): |
|
|
return (1, 9, 32, 32, 3) |
|
|
|
|
|
def get_dummy_inputs(self, with_generator=True): |
|
|
batch_size = 1 |
|
|
sequence_length = 16 |
|
|
num_channels = 4 |
|
|
num_frames = 9 |
|
|
num_latent_frames = 3 |
|
|
sizes = (4, 4) |
|
|
|
|
|
generator = torch.manual_seed(0) |
|
|
noise = floats_tensor((batch_size, num_latent_frames, num_channels) + sizes) |
|
|
input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) |
|
|
|
|
|
pipeline_inputs = { |
|
|
"prompt": "", |
|
|
"num_frames": num_frames, |
|
|
"num_inference_steps": 1, |
|
|
"guidance_scale": 6.0, |
|
|
"height": 32, |
|
|
"width": 32, |
|
|
"max_sequence_length": sequence_length, |
|
|
"output_type": "np", |
|
|
} |
|
|
if with_generator: |
|
|
pipeline_inputs.update({"generator": generator}) |
|
|
|
|
|
return noise, input_ids, pipeline_inputs |
|
|
|
|
|
def test_simple_inference_with_text_lora_denoiser_fused_multi(self): |
|
|
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3) |
|
|
|
|
|
def test_simple_inference_with_text_denoiser_lora_unfused(self): |
|
|
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3) |
|
|
|
|
|
@unittest.skip("Not supported in Wan.") |
|
|
def test_simple_inference_with_text_denoiser_block_scale(self): |
|
|
pass |
|
|
|
|
|
@unittest.skip("Not supported in Wan.") |
|
|
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): |
|
|
pass |
|
|
|
|
|
@unittest.skip("Not supported in Wan.") |
|
|
def test_modify_padding_mode(self): |
|
|
pass |
|
|
|
|
|
@unittest.skip("Text encoder LoRA is not supported in Wan.") |
|
|
def test_simple_inference_with_partial_text_lora(self): |
|
|
pass |
|
|
|
|
|
@unittest.skip("Text encoder LoRA is not supported in Wan.") |
|
|
def test_simple_inference_with_text_lora(self): |
|
|
pass |
|
|
|
|
|
@unittest.skip("Text encoder LoRA is not supported in Wan.") |
|
|
def test_simple_inference_with_text_lora_and_scale(self): |
|
|
pass |
|
|
|
|
|
@unittest.skip("Text encoder LoRA is not supported in Wan.") |
|
|
def test_simple_inference_with_text_lora_fused(self): |
|
|
pass |
|
|
|
|
|
@unittest.skip("Text encoder LoRA is not supported in Wan.") |
|
|
def test_simple_inference_with_text_lora_save_load(self): |
|
|
pass |
|
|
|