| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| import gc |
| import random |
| import unittest |
|
|
| import numpy as np |
| import torch |
| from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer |
|
|
| from diffusers import ( |
| AutoencoderKL, |
| DDIMScheduler, |
| DPMSolverMultistepScheduler, |
| LMSDiscreteScheduler, |
| PNDMScheduler, |
| StableDiffusionImg2ImgPipeline, |
| UNet2DConditionModel, |
| ) |
| from diffusers.image_processor import VaeImageProcessor |
| from diffusers.utils import floats_tensor, load_image, load_numpy, nightly, slow, torch_device |
| from diffusers.utils.testing_utils import require_torch_gpu, skip_mps |
|
|
| from ...pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS |
| from ...test_pipelines_common import PipelineTesterMixin |
|
|
|
|
| torch.backends.cuda.matmul.allow_tf32 = False |
|
|
|
|
| class StableDiffusionImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): |
| pipeline_class = StableDiffusionImg2ImgPipeline |
| params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} |
| required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} |
| batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS |
|
|
| def get_dummy_components(self): |
| torch.manual_seed(0) |
| unet = UNet2DConditionModel( |
| block_out_channels=(32, 64), |
| layers_per_block=2, |
| sample_size=32, |
| in_channels=4, |
| out_channels=4, |
| down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), |
| up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), |
| cross_attention_dim=32, |
| ) |
| scheduler = PNDMScheduler(skip_prk_steps=True) |
| torch.manual_seed(0) |
| vae = AutoencoderKL( |
| block_out_channels=[32, 64], |
| in_channels=3, |
| out_channels=3, |
| down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], |
| up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], |
| latent_channels=4, |
| ) |
| torch.manual_seed(0) |
| text_encoder_config = CLIPTextConfig( |
| bos_token_id=0, |
| eos_token_id=2, |
| hidden_size=32, |
| intermediate_size=37, |
| layer_norm_eps=1e-05, |
| num_attention_heads=4, |
| num_hidden_layers=5, |
| pad_token_id=1, |
| vocab_size=1000, |
| ) |
| text_encoder = CLIPTextModel(text_encoder_config) |
| tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") |
|
|
| components = { |
| "unet": unet, |
| "scheduler": scheduler, |
| "vae": vae, |
| "text_encoder": text_encoder, |
| "tokenizer": tokenizer, |
| "safety_checker": None, |
| "feature_extractor": None, |
| } |
| return components |
|
|
| def get_dummy_inputs(self, device, seed=0, input_image_type="pt", output_type="np"): |
| image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) |
| if str(device).startswith("mps"): |
| generator = torch.manual_seed(seed) |
| else: |
| generator = torch.Generator(device=device).manual_seed(seed) |
|
|
| if input_image_type == "pt": |
| input_image = image |
| elif input_image_type == "np": |
| input_image = image.cpu().numpy().transpose(0, 2, 3, 1) |
| elif input_image_type == "pil": |
| input_image = image.cpu().numpy().transpose(0, 2, 3, 1) |
| input_image = VaeImageProcessor.numpy_to_pil(input_image) |
| else: |
| raise ValueError(f"unsupported input_image_type {input_image_type}.") |
|
|
| if output_type not in ["pt", "np", "pil"]: |
| raise ValueError(f"unsupported output_type {output_type}") |
|
|
| inputs = { |
| "prompt": "A painting of a squirrel eating a burger", |
| "image": input_image, |
| "generator": generator, |
| "num_inference_steps": 2, |
| "guidance_scale": 6.0, |
| "output_type": output_type, |
| } |
| return inputs |
|
|
| def test_stable_diffusion_img2img_default_case(self): |
| device = "cpu" |
| components = self.get_dummy_components() |
| sd_pipe = StableDiffusionImg2ImgPipeline(**components) |
| sd_pipe.image_processor = VaeImageProcessor(vae_scale_factor=sd_pipe.vae_scale_factor, do_normalize=False) |
| sd_pipe = sd_pipe.to(device) |
| sd_pipe.set_progress_bar_config(disable=None) |
|
|
| inputs = self.get_dummy_inputs(device) |
| image = sd_pipe(**inputs).images |
| image_slice = image[0, -3:, -3:, -1] |
|
|
| assert image.shape == (1, 32, 32, 3) |
| expected_slice = np.array([0.4492, 0.3865, 0.4222, 0.5854, 0.5139, 0.4379, 0.4193, 0.48, 0.4218]) |
|
|
| assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 |
|
|
| def test_stable_diffusion_img2img_negative_prompt(self): |
| device = "cpu" |
| components = self.get_dummy_components() |
| sd_pipe = StableDiffusionImg2ImgPipeline(**components) |
| sd_pipe.image_processor = VaeImageProcessor(vae_scale_factor=sd_pipe.vae_scale_factor, do_normalize=False) |
| sd_pipe = sd_pipe.to(device) |
| sd_pipe.set_progress_bar_config(disable=None) |
|
|
| inputs = self.get_dummy_inputs(device) |
| negative_prompt = "french fries" |
| output = sd_pipe(**inputs, negative_prompt=negative_prompt) |
| image = output.images |
| image_slice = image[0, -3:, -3:, -1] |
|
|
| assert image.shape == (1, 32, 32, 3) |
| expected_slice = np.array([0.4065, 0.3783, 0.4050, 0.5266, 0.4781, 0.4252, 0.4203, 0.4692, 0.4365]) |
|
|
| assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 |
|
|
| def test_stable_diffusion_img2img_multiple_init_images(self): |
| device = "cpu" |
| components = self.get_dummy_components() |
| sd_pipe = StableDiffusionImg2ImgPipeline(**components) |
| sd_pipe.image_processor = VaeImageProcessor(vae_scale_factor=sd_pipe.vae_scale_factor, do_normalize=False) |
| sd_pipe = sd_pipe.to(device) |
| sd_pipe.set_progress_bar_config(disable=None) |
|
|
| inputs = self.get_dummy_inputs(device) |
| inputs["prompt"] = [inputs["prompt"]] * 2 |
| inputs["image"] = inputs["image"].repeat(2, 1, 1, 1) |
| image = sd_pipe(**inputs).images |
| image_slice = image[-1, -3:, -3:, -1] |
|
|
| assert image.shape == (2, 32, 32, 3) |
| expected_slice = np.array([0.5144, 0.4447, 0.4735, 0.6676, 0.5526, 0.5454, 0.645, 0.5149, 0.4689]) |
|
|
| assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 |
|
|
| def test_stable_diffusion_img2img_k_lms(self): |
| device = "cpu" |
| components = self.get_dummy_components() |
| components["scheduler"] = LMSDiscreteScheduler( |
| beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear" |
| ) |
| sd_pipe = StableDiffusionImg2ImgPipeline(**components) |
| sd_pipe.image_processor = VaeImageProcessor(vae_scale_factor=sd_pipe.vae_scale_factor, do_normalize=False) |
| sd_pipe = sd_pipe.to(device) |
| sd_pipe.set_progress_bar_config(disable=None) |
|
|
| inputs = self.get_dummy_inputs(device) |
| image = sd_pipe(**inputs).images |
| image_slice = image[0, -3:, -3:, -1] |
|
|
| assert image.shape == (1, 32, 32, 3) |
| expected_slice = np.array([0.4367, 0.4986, 0.4372, 0.6706, 0.5665, 0.444, 0.5864, 0.6019, 0.5203]) |
|
|
| assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 |
|
|
| @skip_mps |
| def test_save_load_local(self): |
| return super().test_save_load_local() |
|
|
| @skip_mps |
| def test_dict_tuple_outputs_equivalent(self): |
| return super().test_dict_tuple_outputs_equivalent() |
|
|
| @skip_mps |
| def test_save_load_optional_components(self): |
| return super().test_save_load_optional_components() |
|
|
| @skip_mps |
| def test_attention_slicing_forward_pass(self): |
| return super().test_attention_slicing_forward_pass() |
|
|
| @skip_mps |
| def test_pt_np_pil_outputs_equivalent(self): |
| device = "cpu" |
| components = self.get_dummy_components() |
| sd_pipe = StableDiffusionImg2ImgPipeline(**components) |
| sd_pipe = sd_pipe.to(device) |
| sd_pipe.set_progress_bar_config(disable=None) |
|
|
| output_pt = sd_pipe(**self.get_dummy_inputs(device, output_type="pt"))[0] |
| output_np = sd_pipe(**self.get_dummy_inputs(device, output_type="np"))[0] |
| output_pil = sd_pipe(**self.get_dummy_inputs(device, output_type="pil"))[0] |
|
|
| assert np.abs(output_pt.cpu().numpy().transpose(0, 2, 3, 1) - output_np).max() <= 1e-4 |
| assert np.abs(np.array(output_pil[0]) - (output_np * 255).round()).max() <= 1e-4 |
|
|
| @skip_mps |
| def test_image_types_consistent(self): |
| device = "cpu" |
| components = self.get_dummy_components() |
| sd_pipe = StableDiffusionImg2ImgPipeline(**components) |
| sd_pipe = sd_pipe.to(device) |
| sd_pipe.set_progress_bar_config(disable=None) |
|
|
| output_pt = sd_pipe(**self.get_dummy_inputs(device, input_image_type="pt"))[0] |
| output_np = sd_pipe(**self.get_dummy_inputs(device, input_image_type="np"))[0] |
| output_pil = sd_pipe(**self.get_dummy_inputs(device, input_image_type="pil"))[0] |
|
|
| assert np.abs(output_pt - output_np).max() <= 1e-4 |
| assert np.abs(output_pil - output_np).max() <= 1e-2 |
|
|
|
|
| @slow |
| @require_torch_gpu |
| class StableDiffusionImg2ImgPipelineSlowTests(unittest.TestCase): |
| def tearDown(self): |
| super().tearDown() |
| gc.collect() |
| torch.cuda.empty_cache() |
|
|
| def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): |
| generator = torch.Generator(device=generator_device).manual_seed(seed) |
| init_image = load_image( |
| "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" |
| "/stable_diffusion_img2img/sketch-mountains-input.png" |
| ) |
| inputs = { |
| "prompt": "a fantasy landscape, concept art, high resolution", |
| "image": init_image, |
| "generator": generator, |
| "num_inference_steps": 3, |
| "strength": 0.75, |
| "guidance_scale": 7.5, |
| "output_type": "np", |
| } |
| return inputs |
|
|
| def test_stable_diffusion_img2img_default(self): |
| pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None) |
| pipe.to(torch_device) |
| pipe.set_progress_bar_config(disable=None) |
| pipe.enable_attention_slicing() |
|
|
| inputs = self.get_inputs(torch_device) |
| image = pipe(**inputs).images |
| image_slice = image[0, -3:, -3:, -1].flatten() |
|
|
| assert image.shape == (1, 512, 768, 3) |
| expected_slice = np.array([0.4300, 0.4662, 0.4930, 0.3990, 0.4307, 0.4525, 0.3719, 0.4064, 0.3923]) |
|
|
| assert np.abs(expected_slice - image_slice).max() < 1e-3 |
|
|
| def test_stable_diffusion_img2img_k_lms(self): |
| pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None) |
| pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) |
| pipe.to(torch_device) |
| pipe.set_progress_bar_config(disable=None) |
| pipe.enable_attention_slicing() |
|
|
| inputs = self.get_inputs(torch_device) |
| image = pipe(**inputs).images |
| image_slice = image[0, -3:, -3:, -1].flatten() |
|
|
| assert image.shape == (1, 512, 768, 3) |
| expected_slice = np.array([0.0389, 0.0346, 0.0415, 0.0290, 0.0218, 0.0210, 0.0408, 0.0567, 0.0271]) |
|
|
| assert np.abs(expected_slice - image_slice).max() < 1e-3 |
|
|
| def test_stable_diffusion_img2img_ddim(self): |
| pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None) |
| pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) |
| pipe.to(torch_device) |
| pipe.set_progress_bar_config(disable=None) |
| pipe.enable_attention_slicing() |
|
|
| inputs = self.get_inputs(torch_device) |
| image = pipe(**inputs).images |
| image_slice = image[0, -3:, -3:, -1].flatten() |
|
|
| assert image.shape == (1, 512, 768, 3) |
| expected_slice = np.array([0.0593, 0.0607, 0.0851, 0.0582, 0.0636, 0.0721, 0.0751, 0.0981, 0.0781]) |
|
|
| assert np.abs(expected_slice - image_slice).max() < 1e-3 |
|
|
| def test_stable_diffusion_img2img_intermediate_state(self): |
| number_of_steps = 0 |
|
|
| def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None: |
| callback_fn.has_been_called = True |
| nonlocal number_of_steps |
| number_of_steps += 1 |
| if step == 1: |
| latents = latents.detach().cpu().numpy() |
| assert latents.shape == (1, 4, 64, 96) |
| latents_slice = latents[0, -3:, -3:, -1] |
| expected_slice = np.array([-0.4958, 0.5107, 1.1045, 2.7539, 4.6680, 3.8320, 1.5049, 1.8633, 2.6523]) |
|
|
| assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 |
| elif step == 2: |
| latents = latents.detach().cpu().numpy() |
| assert latents.shape == (1, 4, 64, 96) |
| latents_slice = latents[0, -3:, -3:, -1] |
| expected_slice = np.array([-0.4956, 0.5078, 1.0918, 2.7520, 4.6484, 3.8125, 1.5146, 1.8633, 2.6367]) |
|
|
| assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 |
|
|
| callback_fn.has_been_called = False |
|
|
| pipe = StableDiffusionImg2ImgPipeline.from_pretrained( |
| "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16 |
| ) |
| pipe = pipe.to(torch_device) |
| pipe.set_progress_bar_config(disable=None) |
| pipe.enable_attention_slicing() |
|
|
| inputs = self.get_inputs(torch_device, dtype=torch.float16) |
| pipe(**inputs, callback=callback_fn, callback_steps=1) |
| assert callback_fn.has_been_called |
| assert number_of_steps == 2 |
|
|
| def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): |
| torch.cuda.empty_cache() |
| torch.cuda.reset_max_memory_allocated() |
| torch.cuda.reset_peak_memory_stats() |
|
|
| pipe = StableDiffusionImg2ImgPipeline.from_pretrained( |
| "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16 |
| ) |
| pipe = pipe.to(torch_device) |
| pipe.set_progress_bar_config(disable=None) |
| pipe.enable_attention_slicing(1) |
| pipe.enable_sequential_cpu_offload() |
|
|
| inputs = self.get_inputs(torch_device, dtype=torch.float16) |
| _ = pipe(**inputs) |
|
|
| mem_bytes = torch.cuda.max_memory_allocated() |
| |
| assert mem_bytes < 2.2 * 10**9 |
|
|
| def test_stable_diffusion_pipeline_with_model_offloading(self): |
| torch.cuda.empty_cache() |
| torch.cuda.reset_max_memory_allocated() |
| torch.cuda.reset_peak_memory_stats() |
|
|
| inputs = self.get_inputs(torch_device, dtype=torch.float16) |
|
|
| |
|
|
| pipe = StableDiffusionImg2ImgPipeline.from_pretrained( |
| "CompVis/stable-diffusion-v1-4", |
| safety_checker=None, |
| torch_dtype=torch.float16, |
| ) |
| pipe.to(torch_device) |
| pipe.set_progress_bar_config(disable=None) |
| pipe(**inputs) |
| mem_bytes = torch.cuda.max_memory_allocated() |
|
|
| |
|
|
| |
| pipe = StableDiffusionImg2ImgPipeline.from_pretrained( |
| "CompVis/stable-diffusion-v1-4", |
| safety_checker=None, |
| torch_dtype=torch.float16, |
| ) |
|
|
| torch.cuda.empty_cache() |
| torch.cuda.reset_max_memory_allocated() |
| torch.cuda.reset_peak_memory_stats() |
|
|
| pipe.enable_model_cpu_offload() |
| pipe.set_progress_bar_config(disable=None) |
| _ = pipe(**inputs) |
| mem_bytes_offloaded = torch.cuda.max_memory_allocated() |
|
|
| assert mem_bytes_offloaded < mem_bytes |
| for module in pipe.text_encoder, pipe.unet, pipe.vae: |
| assert module.device == torch.device("cpu") |
|
|
| def test_stable_diffusion_img2img_pipeline_multiple_of_8(self): |
| init_image = load_image( |
| "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" |
| "/img2img/sketch-mountains-input.jpg" |
| ) |
| |
| init_image = init_image.resize((760, 504)) |
|
|
| model_id = "CompVis/stable-diffusion-v1-4" |
| pipe = StableDiffusionImg2ImgPipeline.from_pretrained( |
| model_id, |
| safety_checker=None, |
| ) |
| pipe.to(torch_device) |
| pipe.set_progress_bar_config(disable=None) |
| pipe.enable_attention_slicing() |
|
|
| prompt = "A fantasy landscape, trending on artstation" |
|
|
| generator = torch.manual_seed(0) |
| output = pipe( |
| prompt=prompt, |
| image=init_image, |
| strength=0.75, |
| guidance_scale=7.5, |
| generator=generator, |
| output_type="np", |
| ) |
| image = output.images[0] |
|
|
| image_slice = image[255:258, 383:386, -1] |
|
|
| assert image.shape == (504, 760, 3) |
| expected_slice = np.array([0.9393, 0.9500, 0.9399, 0.9438, 0.9458, 0.9400, 0.9455, 0.9414, 0.9423]) |
|
|
| assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 |
|
|
|
|
| @nightly |
| @require_torch_gpu |
| class StableDiffusionImg2ImgPipelineNightlyTests(unittest.TestCase): |
| def tearDown(self): |
| super().tearDown() |
| gc.collect() |
| torch.cuda.empty_cache() |
|
|
| def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): |
| generator = torch.Generator(device=generator_device).manual_seed(seed) |
| init_image = load_image( |
| "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" |
| "/stable_diffusion_img2img/sketch-mountains-input.png" |
| ) |
| inputs = { |
| "prompt": "a fantasy landscape, concept art, high resolution", |
| "image": init_image, |
| "generator": generator, |
| "num_inference_steps": 50, |
| "strength": 0.75, |
| "guidance_scale": 7.5, |
| "output_type": "np", |
| } |
| return inputs |
|
|
| def test_img2img_pndm(self): |
| sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") |
| sd_pipe.to(torch_device) |
| sd_pipe.set_progress_bar_config(disable=None) |
|
|
| inputs = self.get_inputs(torch_device) |
| image = sd_pipe(**inputs).images[0] |
|
|
| expected_image = load_numpy( |
| "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" |
| "/stable_diffusion_img2img/stable_diffusion_1_5_pndm.npy" |
| ) |
| max_diff = np.abs(expected_image - image).max() |
| assert max_diff < 1e-3 |
|
|
| def test_img2img_ddim(self): |
| sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") |
| sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config) |
| sd_pipe.to(torch_device) |
| sd_pipe.set_progress_bar_config(disable=None) |
|
|
| inputs = self.get_inputs(torch_device) |
| image = sd_pipe(**inputs).images[0] |
|
|
| expected_image = load_numpy( |
| "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" |
| "/stable_diffusion_img2img/stable_diffusion_1_5_ddim.npy" |
| ) |
| max_diff = np.abs(expected_image - image).max() |
| assert max_diff < 1e-3 |
|
|
| def test_img2img_lms(self): |
| sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") |
| sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) |
| sd_pipe.to(torch_device) |
| sd_pipe.set_progress_bar_config(disable=None) |
|
|
| inputs = self.get_inputs(torch_device) |
| image = sd_pipe(**inputs).images[0] |
|
|
| expected_image = load_numpy( |
| "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" |
| "/stable_diffusion_img2img/stable_diffusion_1_5_lms.npy" |
| ) |
| max_diff = np.abs(expected_image - image).max() |
| assert max_diff < 1e-3 |
|
|
| def test_img2img_dpm(self): |
| sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") |
| sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(sd_pipe.scheduler.config) |
| sd_pipe.to(torch_device) |
| sd_pipe.set_progress_bar_config(disable=None) |
|
|
| inputs = self.get_inputs(torch_device) |
| inputs["num_inference_steps"] = 30 |
| image = sd_pipe(**inputs).images[0] |
|
|
| expected_image = load_numpy( |
| "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" |
| "/stable_diffusion_img2img/stable_diffusion_1_5_dpm.npy" |
| ) |
| max_diff = np.abs(expected_image - image).max() |
| assert max_diff < 1e-3 |
|
|