| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| |
|
| | import gc |
| | import random |
| | import tempfile |
| | import unittest |
| |
|
| | import numpy as np |
| | import torch |
| | from PIL import Image |
| | from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer |
| |
|
| | from diffusers import ( |
| | AutoencoderKL, |
| | ControlNetModel, |
| | DDIMScheduler, |
| | StableDiffusionControlNetInpaintPipeline, |
| | UNet2DConditionModel, |
| | ) |
| | from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel |
| | from diffusers.utils import load_image |
| | from diffusers.utils.import_utils import is_xformers_available |
| | from diffusers.utils.torch_utils import randn_tensor |
| |
|
| | from ...testing_utils import ( |
| | backend_empty_cache, |
| | enable_full_determinism, |
| | floats_tensor, |
| | load_numpy, |
| | numpy_cosine_similarity_distance, |
| | require_torch_accelerator, |
| | slow, |
| | torch_device, |
| | ) |
| | from ..pipeline_params import ( |
| | TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, |
| | TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, |
| | TEXT_TO_IMAGE_IMAGE_PARAMS, |
| | ) |
| | from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin |
| |
|
| |
|
| | enable_full_determinism() |
| |
|
| |
|
| | class ControlNetInpaintPipelineFastTests( |
| | PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase |
| | ): |
| | pipeline_class = StableDiffusionControlNetInpaintPipeline |
| | params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS |
| | batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS |
| | image_params = frozenset({"control_image"}) |
| | image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS |
| |
|
| | def get_dummy_components(self): |
| | torch.manual_seed(0) |
| | unet = UNet2DConditionModel( |
| | block_out_channels=(32, 64), |
| | layers_per_block=2, |
| | sample_size=32, |
| | in_channels=9, |
| | out_channels=4, |
| | down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), |
| | up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), |
| | cross_attention_dim=32, |
| | ) |
| | torch.manual_seed(0) |
| | controlnet = ControlNetModel( |
| | block_out_channels=(32, 64), |
| | layers_per_block=2, |
| | in_channels=4, |
| | down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), |
| | cross_attention_dim=32, |
| | conditioning_embedding_out_channels=(16, 32), |
| | ) |
| | torch.manual_seed(0) |
| | scheduler = DDIMScheduler( |
| | beta_start=0.00085, |
| | beta_end=0.012, |
| | beta_schedule="scaled_linear", |
| | clip_sample=False, |
| | set_alpha_to_one=False, |
| | ) |
| | torch.manual_seed(0) |
| | vae = AutoencoderKL( |
| | block_out_channels=[32, 64], |
| | in_channels=3, |
| | out_channels=3, |
| | down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], |
| | up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], |
| | latent_channels=4, |
| | ) |
| | torch.manual_seed(0) |
| | text_encoder_config = CLIPTextConfig( |
| | bos_token_id=0, |
| | eos_token_id=2, |
| | hidden_size=32, |
| | intermediate_size=37, |
| | layer_norm_eps=1e-05, |
| | num_attention_heads=4, |
| | num_hidden_layers=5, |
| | pad_token_id=1, |
| | vocab_size=1000, |
| | ) |
| | text_encoder = CLIPTextModel(text_encoder_config) |
| | tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") |
| |
|
| | components = { |
| | "unet": unet, |
| | "controlnet": controlnet, |
| | "scheduler": scheduler, |
| | "vae": vae, |
| | "text_encoder": text_encoder, |
| | "tokenizer": tokenizer, |
| | "safety_checker": None, |
| | "feature_extractor": None, |
| | "image_encoder": None, |
| | } |
| | return components |
| |
|
| | def get_dummy_inputs(self, device, seed=0): |
| | if str(device).startswith("mps"): |
| | generator = torch.manual_seed(seed) |
| | else: |
| | generator = torch.Generator(device=device).manual_seed(seed) |
| |
|
| | controlnet_embedder_scale_factor = 2 |
| | control_image = randn_tensor( |
| | (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), |
| | generator=generator, |
| | device=torch.device(device), |
| | ) |
| | init_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) |
| | init_image = init_image.cpu().permute(0, 2, 3, 1)[0] |
| |
|
| | image = Image.fromarray(np.uint8(init_image)).convert("RGB").resize((64, 64)) |
| | mask_image = Image.fromarray(np.uint8(init_image + 4)).convert("RGB").resize((64, 64)) |
| |
|
| | inputs = { |
| | "prompt": "A painting of a squirrel eating a burger", |
| | "generator": generator, |
| | "num_inference_steps": 2, |
| | "guidance_scale": 6.0, |
| | "output_type": "np", |
| | "image": image, |
| | "mask_image": mask_image, |
| | "control_image": control_image, |
| | } |
| |
|
| | return inputs |
| |
|
| | def test_attention_slicing_forward_pass(self): |
| | return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) |
| |
|
| | @unittest.skipIf( |
| | torch_device != "cuda" or not is_xformers_available(), |
| | reason="XFormers attention is only available with CUDA and `xformers` installed", |
| | ) |
| | def test_xformers_attention_forwardGenerator_pass(self): |
| | self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) |
| |
|
| | def test_inference_batch_single_identical(self): |
| | self._test_inference_batch_single_identical(expected_max_diff=2e-3) |
| |
|
| | def test_encode_prompt_works_in_isolation(self): |
| | extra_required_param_value_dict = { |
| | "device": torch.device(torch_device).type, |
| | "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, |
| | } |
| | return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) |
| |
|
| |
|
| | class ControlNetSimpleInpaintPipelineFastTests(ControlNetInpaintPipelineFastTests): |
| | pipeline_class = StableDiffusionControlNetInpaintPipeline |
| | params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS |
| | batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS |
| | image_params = frozenset([]) |
| |
|
| | def get_dummy_components(self): |
| | torch.manual_seed(0) |
| | unet = UNet2DConditionModel( |
| | block_out_channels=(32, 64), |
| | layers_per_block=2, |
| | sample_size=32, |
| | in_channels=4, |
| | out_channels=4, |
| | down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), |
| | up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), |
| | cross_attention_dim=32, |
| | ) |
| | torch.manual_seed(0) |
| | controlnet = ControlNetModel( |
| | block_out_channels=(32, 64), |
| | layers_per_block=2, |
| | in_channels=4, |
| | down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), |
| | cross_attention_dim=32, |
| | conditioning_embedding_out_channels=(16, 32), |
| | ) |
| | torch.manual_seed(0) |
| | scheduler = DDIMScheduler( |
| | beta_start=0.00085, |
| | beta_end=0.012, |
| | beta_schedule="scaled_linear", |
| | clip_sample=False, |
| | set_alpha_to_one=False, |
| | ) |
| | torch.manual_seed(0) |
| | vae = AutoencoderKL( |
| | block_out_channels=[32, 64], |
| | in_channels=3, |
| | out_channels=3, |
| | down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], |
| | up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], |
| | latent_channels=4, |
| | ) |
| | torch.manual_seed(0) |
| | text_encoder_config = CLIPTextConfig( |
| | bos_token_id=0, |
| | eos_token_id=2, |
| | hidden_size=32, |
| | intermediate_size=37, |
| | layer_norm_eps=1e-05, |
| | num_attention_heads=4, |
| | num_hidden_layers=5, |
| | pad_token_id=1, |
| | vocab_size=1000, |
| | ) |
| | text_encoder = CLIPTextModel(text_encoder_config) |
| | tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") |
| |
|
| | components = { |
| | "unet": unet, |
| | "controlnet": controlnet, |
| | "scheduler": scheduler, |
| | "vae": vae, |
| | "text_encoder": text_encoder, |
| | "tokenizer": tokenizer, |
| | "safety_checker": None, |
| | "feature_extractor": None, |
| | "image_encoder": None, |
| | } |
| | return components |
| |
|
| |
|
| | class MultiControlNetInpaintPipelineFastTests( |
| | PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase |
| | ): |
| | pipeline_class = StableDiffusionControlNetInpaintPipeline |
| | params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS |
| | batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS |
| |
|
| | supports_dduf = False |
| |
|
| | def get_dummy_components(self): |
| | torch.manual_seed(0) |
| | unet = UNet2DConditionModel( |
| | block_out_channels=(32, 64), |
| | layers_per_block=2, |
| | sample_size=32, |
| | in_channels=9, |
| | out_channels=4, |
| | down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), |
| | up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), |
| | cross_attention_dim=32, |
| | ) |
| | torch.manual_seed(0) |
| |
|
| | def init_weights(m): |
| | if isinstance(m, torch.nn.Conv2d): |
| | torch.nn.init.normal_(m.weight) |
| | m.bias.data.fill_(1.0) |
| |
|
| | controlnet1 = ControlNetModel( |
| | block_out_channels=(32, 64), |
| | layers_per_block=2, |
| | in_channels=4, |
| | down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), |
| | cross_attention_dim=32, |
| | conditioning_embedding_out_channels=(16, 32), |
| | ) |
| | controlnet1.controlnet_down_blocks.apply(init_weights) |
| |
|
| | torch.manual_seed(0) |
| | controlnet2 = ControlNetModel( |
| | block_out_channels=(32, 64), |
| | layers_per_block=2, |
| | in_channels=4, |
| | down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), |
| | cross_attention_dim=32, |
| | conditioning_embedding_out_channels=(16, 32), |
| | ) |
| | controlnet2.controlnet_down_blocks.apply(init_weights) |
| |
|
| | torch.manual_seed(0) |
| | scheduler = DDIMScheduler( |
| | beta_start=0.00085, |
| | beta_end=0.012, |
| | beta_schedule="scaled_linear", |
| | clip_sample=False, |
| | set_alpha_to_one=False, |
| | ) |
| | torch.manual_seed(0) |
| | vae = AutoencoderKL( |
| | block_out_channels=[32, 64], |
| | in_channels=3, |
| | out_channels=3, |
| | down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], |
| | up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], |
| | latent_channels=4, |
| | ) |
| | torch.manual_seed(0) |
| | text_encoder_config = CLIPTextConfig( |
| | bos_token_id=0, |
| | eos_token_id=2, |
| | hidden_size=32, |
| | intermediate_size=37, |
| | layer_norm_eps=1e-05, |
| | num_attention_heads=4, |
| | num_hidden_layers=5, |
| | pad_token_id=1, |
| | vocab_size=1000, |
| | ) |
| | text_encoder = CLIPTextModel(text_encoder_config) |
| | tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") |
| |
|
| | controlnet = MultiControlNetModel([controlnet1, controlnet2]) |
| |
|
| | components = { |
| | "unet": unet, |
| | "controlnet": controlnet, |
| | "scheduler": scheduler, |
| | "vae": vae, |
| | "text_encoder": text_encoder, |
| | "tokenizer": tokenizer, |
| | "safety_checker": None, |
| | "feature_extractor": None, |
| | "image_encoder": None, |
| | } |
| | return components |
| |
|
| | def get_dummy_inputs(self, device, seed=0): |
| | if str(device).startswith("mps"): |
| | generator = torch.manual_seed(seed) |
| | else: |
| | generator = torch.Generator(device=device).manual_seed(seed) |
| |
|
| | controlnet_embedder_scale_factor = 2 |
| |
|
| | control_image = [ |
| | randn_tensor( |
| | (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), |
| | generator=generator, |
| | device=torch.device(device), |
| | ), |
| | randn_tensor( |
| | (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), |
| | generator=generator, |
| | device=torch.device(device), |
| | ), |
| | ] |
| | init_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) |
| | init_image = init_image.cpu().permute(0, 2, 3, 1)[0] |
| |
|
| | image = Image.fromarray(np.uint8(init_image)).convert("RGB").resize((64, 64)) |
| | mask_image = Image.fromarray(np.uint8(init_image + 4)).convert("RGB").resize((64, 64)) |
| |
|
| | inputs = { |
| | "prompt": "A painting of a squirrel eating a burger", |
| | "generator": generator, |
| | "num_inference_steps": 2, |
| | "guidance_scale": 6.0, |
| | "output_type": "np", |
| | "image": image, |
| | "mask_image": mask_image, |
| | "control_image": control_image, |
| | } |
| |
|
| | return inputs |
| |
|
| | def test_control_guidance_switch(self): |
| | components = self.get_dummy_components() |
| | pipe = self.pipeline_class(**components) |
| | pipe.to(torch_device) |
| |
|
| | scale = 10.0 |
| | steps = 4 |
| |
|
| | inputs = self.get_dummy_inputs(torch_device) |
| | inputs["num_inference_steps"] = steps |
| | inputs["controlnet_conditioning_scale"] = scale |
| | output_1 = pipe(**inputs)[0] |
| |
|
| | inputs = self.get_dummy_inputs(torch_device) |
| | inputs["num_inference_steps"] = steps |
| | inputs["controlnet_conditioning_scale"] = scale |
| | output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] |
| |
|
| | inputs = self.get_dummy_inputs(torch_device) |
| | inputs["num_inference_steps"] = steps |
| | inputs["controlnet_conditioning_scale"] = scale |
| | output_3 = pipe(**inputs, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7])[0] |
| |
|
| | inputs = self.get_dummy_inputs(torch_device) |
| | inputs["num_inference_steps"] = steps |
| | inputs["controlnet_conditioning_scale"] = scale |
| | output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8])[0] |
| |
|
| | |
| | assert np.sum(np.abs(output_1 - output_2)) > 1e-3 |
| | assert np.sum(np.abs(output_1 - output_3)) > 1e-3 |
| | assert np.sum(np.abs(output_1 - output_4)) > 1e-3 |
| |
|
| | def test_attention_slicing_forward_pass(self): |
| | return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) |
| |
|
| | @unittest.skipIf( |
| | torch_device != "cuda" or not is_xformers_available(), |
| | reason="XFormers attention is only available with CUDA and `xformers` installed", |
| | ) |
| | def test_xformers_attention_forwardGenerator_pass(self): |
| | self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) |
| |
|
| | def test_inference_batch_single_identical(self): |
| | self._test_inference_batch_single_identical(expected_max_diff=2e-3) |
| |
|
| | def test_save_pretrained_raise_not_implemented_exception(self): |
| | components = self.get_dummy_components() |
| | pipe = self.pipeline_class(**components) |
| | pipe.to(torch_device) |
| | pipe.set_progress_bar_config(disable=None) |
| | with tempfile.TemporaryDirectory() as tmpdir: |
| | try: |
| | |
| | pipe.save_pretrained(tmpdir) |
| | except NotImplementedError: |
| | pass |
| |
|
| | def test_encode_prompt_works_in_isolation(self): |
| | extra_required_param_value_dict = { |
| | "device": torch.device(torch_device).type, |
| | "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, |
| | } |
| | return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) |
| |
|
| |
|
| | @slow |
| | @require_torch_accelerator |
| | class ControlNetInpaintPipelineSlowTests(unittest.TestCase): |
| | def setUp(self): |
| | super().setUp() |
| | gc.collect() |
| | backend_empty_cache(torch_device) |
| |
|
| | def tearDown(self): |
| | super().tearDown() |
| | gc.collect() |
| | backend_empty_cache(torch_device) |
| |
|
| | def test_canny(self): |
| | controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") |
| |
|
| | pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained( |
| | "botp/stable-diffusion-v1-5-inpainting", safety_checker=None, controlnet=controlnet |
| | ) |
| | pipe.enable_model_cpu_offload(device=torch_device) |
| | pipe.set_progress_bar_config(disable=None) |
| |
|
| | generator = torch.Generator(device="cpu").manual_seed(0) |
| | image = load_image( |
| | "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" |
| | ).resize((512, 512)) |
| |
|
| | mask_image = load_image( |
| | "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" |
| | "/stable_diffusion_inpaint/input_bench_mask.png" |
| | ).resize((512, 512)) |
| |
|
| | prompt = "pitch black hole" |
| |
|
| | control_image = load_image( |
| | "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" |
| | ).resize((512, 512)) |
| |
|
| | output = pipe( |
| | prompt, |
| | image=image, |
| | mask_image=mask_image, |
| | control_image=control_image, |
| | generator=generator, |
| | output_type="np", |
| | num_inference_steps=3, |
| | ) |
| |
|
| | image = output.images[0] |
| |
|
| | assert image.shape == (512, 512, 3) |
| |
|
| | expected_image = load_numpy( |
| | "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/inpaint.npy" |
| | ) |
| |
|
| | assert np.abs(expected_image - image).max() < 9e-2 |
| |
|
| | def test_inpaint(self): |
| | controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_inpaint") |
| |
|
| | pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained( |
| | "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet |
| | ) |
| | pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) |
| | pipe.enable_model_cpu_offload(device=torch_device) |
| | pipe.set_progress_bar_config(disable=None) |
| |
|
| | generator = torch.Generator(device="cpu").manual_seed(33) |
| |
|
| | init_image = load_image( |
| | "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy.png" |
| | ) |
| | init_image = init_image.resize((512, 512)) |
| |
|
| | mask_image = load_image( |
| | "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy_mask.png" |
| | ) |
| | mask_image = mask_image.resize((512, 512)) |
| |
|
| | prompt = "a handsome man with ray-ban sunglasses" |
| |
|
| | def make_inpaint_condition(image, image_mask): |
| | image = np.array(image.convert("RGB")).astype(np.float32) / 255.0 |
| | image_mask = np.array(image_mask.convert("L")).astype(np.float32) / 255.0 |
| |
|
| | assert image.shape[0:1] == image_mask.shape[0:1], "image and image_mask must have the same image size" |
| | image[image_mask > 0.5] = -1.0 |
| | image = np.expand_dims(image, 0).transpose(0, 3, 1, 2) |
| | image = torch.from_numpy(image) |
| | return image |
| |
|
| | control_image = make_inpaint_condition(init_image, mask_image) |
| |
|
| | output = pipe( |
| | prompt, |
| | image=init_image, |
| | mask_image=mask_image, |
| | control_image=control_image, |
| | guidance_scale=9.0, |
| | eta=1.0, |
| | generator=generator, |
| | num_inference_steps=20, |
| | output_type="np", |
| | ) |
| | image = output.images[0] |
| |
|
| | assert image.shape == (512, 512, 3) |
| |
|
| | expected_image = load_numpy( |
| | "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/boy_ray_ban.npy" |
| | ) |
| |
|
| | assert numpy_cosine_similarity_distance(expected_image.flatten(), image.flatten()) < 1e-2 |
| |
|