| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | import random |
| | import unittest |
| |
|
| | import numpy as np |
| | import torch |
| | from PIL import Image |
| | from transformers import ( |
| | CLIPImageProcessor, |
| | CLIPTextConfig, |
| | CLIPTextModel, |
| | CLIPTextModelWithProjection, |
| | CLIPTokenizer, |
| | CLIPVisionConfig, |
| | CLIPVisionModelWithProjection, |
| | ) |
| |
|
| | from diffusers import ( |
| | AutoencoderKL, |
| | ControlNetModel, |
| | EulerDiscreteScheduler, |
| | StableDiffusionXLControlNetInpaintPipeline, |
| | UNet2DConditionModel, |
| | ) |
| | from diffusers.utils.import_utils import is_xformers_available |
| | from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device |
| |
|
| | from ..pipeline_params import ( |
| | IMAGE_TO_IMAGE_IMAGE_PARAMS, |
| | TEXT_TO_IMAGE_BATCH_PARAMS, |
| | TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, |
| | TEXT_TO_IMAGE_IMAGE_PARAMS, |
| | TEXT_TO_IMAGE_PARAMS, |
| | ) |
| | from ..test_pipelines_common import ( |
| | PipelineKarrasSchedulerTesterMixin, |
| | PipelineLatentTesterMixin, |
| | PipelineTesterMixin, |
| | ) |
| |
|
| |
|
| | enable_full_determinism() |
| |
|
| |
|
| | class ControlNetPipelineSDXLFastTests( |
| | PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase |
| | ): |
| | pipeline_class = StableDiffusionXLControlNetInpaintPipeline |
| | params = TEXT_TO_IMAGE_PARAMS |
| | batch_params = TEXT_TO_IMAGE_BATCH_PARAMS |
| | image_params = frozenset(IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"mask_image", "control_image"})) |
| | image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS |
| | callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union( |
| | { |
| | "add_text_embeds", |
| | "add_time_ids", |
| | "mask", |
| | "masked_image_latents", |
| | } |
| | ) |
| |
|
| | def get_dummy_components(self): |
| | torch.manual_seed(0) |
| | unet = UNet2DConditionModel( |
| | block_out_channels=(32, 64), |
| | layers_per_block=2, |
| | sample_size=32, |
| | in_channels=4, |
| | out_channels=4, |
| | down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), |
| | up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), |
| | |
| | attention_head_dim=(2, 4), |
| | use_linear_projection=True, |
| | addition_embed_type="text_time", |
| | addition_time_embed_dim=8, |
| | transformer_layers_per_block=(1, 2), |
| | projection_class_embeddings_input_dim=80, |
| | cross_attention_dim=64, |
| | ) |
| | torch.manual_seed(0) |
| | controlnet = ControlNetModel( |
| | block_out_channels=(32, 64), |
| | layers_per_block=2, |
| | in_channels=4, |
| | down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), |
| | conditioning_embedding_out_channels=(16, 32), |
| | |
| | attention_head_dim=(2, 4), |
| | use_linear_projection=True, |
| | addition_embed_type="text_time", |
| | addition_time_embed_dim=8, |
| | transformer_layers_per_block=(1, 2), |
| | projection_class_embeddings_input_dim=80, |
| | cross_attention_dim=64, |
| | ) |
| | scheduler = EulerDiscreteScheduler( |
| | beta_start=0.00085, |
| | beta_end=0.012, |
| | steps_offset=1, |
| | beta_schedule="scaled_linear", |
| | timestep_spacing="leading", |
| | ) |
| | torch.manual_seed(0) |
| | vae = AutoencoderKL( |
| | block_out_channels=[32, 64], |
| | in_channels=3, |
| | out_channels=3, |
| | down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], |
| | up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], |
| | latent_channels=4, |
| | ) |
| | torch.manual_seed(0) |
| | text_encoder_config = CLIPTextConfig( |
| | bos_token_id=0, |
| | eos_token_id=2, |
| | hidden_size=32, |
| | intermediate_size=37, |
| | layer_norm_eps=1e-05, |
| | num_attention_heads=4, |
| | num_hidden_layers=5, |
| | pad_token_id=1, |
| | vocab_size=1000, |
| | |
| | hidden_act="gelu", |
| | projection_dim=32, |
| | ) |
| | text_encoder = CLIPTextModel(text_encoder_config) |
| | tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") |
| |
|
| | torch.manual_seed(0) |
| | text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) |
| | tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") |
| |
|
| | image_encoder_config = CLIPVisionConfig( |
| | hidden_size=32, |
| | image_size=224, |
| | projection_dim=32, |
| | intermediate_size=37, |
| | num_attention_heads=4, |
| | num_channels=3, |
| | num_hidden_layers=5, |
| | patch_size=14, |
| | ) |
| |
|
| | image_encoder = CLIPVisionModelWithProjection(image_encoder_config) |
| |
|
| | feature_extractor = CLIPImageProcessor( |
| | crop_size=224, |
| | do_center_crop=True, |
| | do_normalize=True, |
| | do_resize=True, |
| | image_mean=[0.48145466, 0.4578275, 0.40821073], |
| | image_std=[0.26862954, 0.26130258, 0.27577711], |
| | resample=3, |
| | size=224, |
| | ) |
| |
|
| | components = { |
| | "unet": unet, |
| | "controlnet": controlnet, |
| | "scheduler": scheduler, |
| | "vae": vae, |
| | "text_encoder": text_encoder, |
| | "tokenizer": tokenizer, |
| | "text_encoder_2": text_encoder_2, |
| | "tokenizer_2": tokenizer_2, |
| | "image_encoder": image_encoder, |
| | "feature_extractor": feature_extractor, |
| | } |
| | return components |
| |
|
| | def get_dummy_inputs(self, device, seed=0, img_res=64): |
| | if str(device).startswith("mps"): |
| | generator = torch.manual_seed(seed) |
| | else: |
| | generator = torch.Generator(device=device).manual_seed(seed) |
| |
|
| | |
| | image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) |
| | image = image.cpu().permute(0, 2, 3, 1)[0] |
| | mask_image = torch.ones_like(image) |
| | controlnet_embedder_scale_factor = 2 |
| | control_image = ( |
| | floats_tensor( |
| | (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), |
| | rng=random.Random(seed), |
| | ) |
| | .to(device) |
| | .cpu() |
| | ) |
| | control_image = control_image.cpu().permute(0, 2, 3, 1)[0] |
| | |
| | image = 255 * image |
| | mask_image = 255 * mask_image |
| | control_image = 255 * control_image |
| | |
| | init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((img_res, img_res)) |
| | mask_image = Image.fromarray(np.uint8(mask_image)).convert("L").resize((img_res, img_res)) |
| | control_image = Image.fromarray(np.uint8(control_image)).convert("RGB").resize((img_res, img_res)) |
| |
|
| | inputs = { |
| | "prompt": "A painting of a squirrel eating a burger", |
| | "generator": generator, |
| | "num_inference_steps": 2, |
| | "guidance_scale": 6.0, |
| | "output_type": "np", |
| | "image": init_image, |
| | "mask_image": mask_image, |
| | "control_image": control_image, |
| | } |
| | return inputs |
| |
|
| | def test_attention_slicing_forward_pass(self): |
| | return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) |
| |
|
| | def test_dict_tuple_outputs_equivalent(self): |
| | expected_slice = None |
| | if torch_device == "cpu": |
| | expected_slice = np.array([0.5490, 0.5053, 0.4676, 0.5816, 0.5364, 0.4830, 0.5937, 0.5719, 0.4318]) |
| | super().test_dict_tuple_outputs_equivalent(expected_slice=expected_slice) |
| |
|
| | @unittest.skipIf( |
| | torch_device != "cuda" or not is_xformers_available(), |
| | reason="XFormers attention is only available with CUDA and `xformers` installed", |
| | ) |
| | def test_xformers_attention_forwardGenerator_pass(self): |
| | self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) |
| |
|
| | def test_inference_batch_single_identical(self): |
| | self._test_inference_batch_single_identical(expected_max_diff=2e-3) |
| |
|
| | @require_torch_gpu |
| | def test_stable_diffusion_xl_offloads(self): |
| | pipes = [] |
| | components = self.get_dummy_components() |
| | sd_pipe = self.pipeline_class(**components).to(torch_device) |
| | pipes.append(sd_pipe) |
| |
|
| | components = self.get_dummy_components() |
| | sd_pipe = self.pipeline_class(**components) |
| | sd_pipe.enable_model_cpu_offload() |
| | pipes.append(sd_pipe) |
| |
|
| | components = self.get_dummy_components() |
| | sd_pipe = self.pipeline_class(**components) |
| | sd_pipe.enable_sequential_cpu_offload() |
| | pipes.append(sd_pipe) |
| |
|
| | image_slices = [] |
| | for pipe in pipes: |
| | pipe.unet.set_default_attn_processor() |
| |
|
| | inputs = self.get_dummy_inputs(torch_device) |
| | image = pipe(**inputs).images |
| |
|
| | image_slices.append(image[0, -3:, -3:, -1].flatten()) |
| |
|
| | assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 |
| | assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 |
| |
|
| | def test_stable_diffusion_xl_multi_prompts(self): |
| | components = self.get_dummy_components() |
| | sd_pipe = self.pipeline_class(**components).to(torch_device) |
| |
|
| | |
| | inputs = self.get_dummy_inputs(torch_device) |
| | output = sd_pipe(**inputs) |
| | image_slice_1 = output.images[0, -3:, -3:, -1] |
| |
|
| | |
| | inputs = self.get_dummy_inputs(torch_device) |
| | inputs["prompt_2"] = inputs["prompt"] |
| | output = sd_pipe(**inputs) |
| | image_slice_2 = output.images[0, -3:, -3:, -1] |
| |
|
| | |
| | assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 |
| |
|
| | |
| | inputs = self.get_dummy_inputs(torch_device) |
| | inputs["prompt_2"] = "different prompt" |
| | output = sd_pipe(**inputs) |
| | image_slice_3 = output.images[0, -3:, -3:, -1] |
| |
|
| | |
| | assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 |
| |
|
| | |
| | inputs = self.get_dummy_inputs(torch_device) |
| | inputs["negative_prompt"] = "negative prompt" |
| | output = sd_pipe(**inputs) |
| | image_slice_1 = output.images[0, -3:, -3:, -1] |
| |
|
| | |
| | inputs = self.get_dummy_inputs(torch_device) |
| | inputs["negative_prompt"] = "negative prompt" |
| | inputs["negative_prompt_2"] = inputs["negative_prompt"] |
| | output = sd_pipe(**inputs) |
| | image_slice_2 = output.images[0, -3:, -3:, -1] |
| |
|
| | |
| | assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 |
| |
|
| | |
| | inputs = self.get_dummy_inputs(torch_device) |
| | inputs["negative_prompt"] = "negative prompt" |
| | inputs["negative_prompt_2"] = "different negative prompt" |
| | output = sd_pipe(**inputs) |
| | image_slice_3 = output.images[0, -3:, -3:, -1] |
| |
|
| | |
| | assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 |
| |
|
| | def test_controlnet_sdxl_guess(self): |
| | device = "cpu" |
| |
|
| | components = self.get_dummy_components() |
| |
|
| | sd_pipe = self.pipeline_class(**components) |
| | sd_pipe = sd_pipe.to(device) |
| |
|
| | sd_pipe.set_progress_bar_config(disable=None) |
| |
|
| | inputs = self.get_dummy_inputs(device) |
| | inputs["guess_mode"] = True |
| |
|
| | output = sd_pipe(**inputs) |
| | image_slice = output.images[0, -3:, -3:, -1] |
| | expected_slice = np.array([0.549, 0.5053, 0.4676, 0.5816, 0.5364, 0.483, 0.5937, 0.5719, 0.4318]) |
| |
|
| | |
| | assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-4 |
| |
|
| | |
| | def test_save_load_optional_components(self): |
| | pass |
| |
|
| | def test_float16_inference(self): |
| | super().test_float16_inference(expected_max_diff=5e-1) |
| |
|