repo_id
stringlengths
15
89
file_path
stringlengths
27
180
content
stringlengths
1
2.23M
__index_level_0__
int64
0
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/others/test_outputs.py
import pickle as pkl import unittest from dataclasses import dataclass from typing import List, Union import numpy as np import PIL.Image from diffusers.utils.outputs import BaseOutput from diffusers.utils.testing_utils import require_torch @dataclass class CustomOutput(BaseOutput): images: Union[List[PIL.Image.Image], np.ndarray] class ConfigTester(unittest.TestCase): def test_outputs_single_attribute(self): outputs = CustomOutput(images=np.random.rand(1, 3, 4, 4)) # check every way of getting the attribute assert isinstance(outputs.images, np.ndarray) assert outputs.images.shape == (1, 3, 4, 4) assert isinstance(outputs["images"], np.ndarray) assert outputs["images"].shape == (1, 3, 4, 4) assert isinstance(outputs[0], np.ndarray) assert outputs[0].shape == (1, 3, 4, 4) # test with a non-tensor attribute outputs = CustomOutput(images=[PIL.Image.new("RGB", (4, 4))]) # check every way of getting the attribute assert isinstance(outputs.images, list) assert isinstance(outputs.images[0], PIL.Image.Image) assert isinstance(outputs["images"], list) assert isinstance(outputs["images"][0], PIL.Image.Image) assert isinstance(outputs[0], list) assert isinstance(outputs[0][0], PIL.Image.Image) def test_outputs_dict_init(self): # test output reinitialization with a `dict` for compatibility with `accelerate` outputs = CustomOutput({"images": np.random.rand(1, 3, 4, 4)}) # check every way of getting the attribute assert isinstance(outputs.images, np.ndarray) assert outputs.images.shape == (1, 3, 4, 4) assert isinstance(outputs["images"], np.ndarray) assert outputs["images"].shape == (1, 3, 4, 4) assert isinstance(outputs[0], np.ndarray) assert outputs[0].shape == (1, 3, 4, 4) # test with a non-tensor attribute outputs = CustomOutput({"images": [PIL.Image.new("RGB", (4, 4))]}) # check every way of getting the attribute assert isinstance(outputs.images, list) assert isinstance(outputs.images[0], PIL.Image.Image) assert isinstance(outputs["images"], list) assert isinstance(outputs["images"][0], PIL.Image.Image) assert isinstance(outputs[0], list) assert isinstance(outputs[0][0], PIL.Image.Image) def test_outputs_serialization(self): outputs_orig = CustomOutput(images=[PIL.Image.new("RGB", (4, 4))]) serialized = pkl.dumps(outputs_orig) outputs_copy = pkl.loads(serialized) # Check original and copy are equal assert dir(outputs_orig) == dir(outputs_copy) assert dict(outputs_orig) == dict(outputs_copy) assert vars(outputs_orig) == vars(outputs_copy) @require_torch def test_torch_pytree(self): # ensure torch.utils._pytree treats ModelOutput subclasses as nodes (and not leaves) # this is important for DistributedDataParallel gradient synchronization with static_graph=True import torch import torch.utils._pytree data = np.random.rand(1, 3, 4, 4) x = CustomOutput(images=data) self.assertFalse(torch.utils._pytree._is_leaf(x)) expected_flat_outs = [data] expected_tree_spec = torch.utils._pytree.TreeSpec(CustomOutput, ["images"], [torch.utils._pytree.LeafSpec()]) actual_flat_outs, actual_tree_spec = torch.utils._pytree.tree_flatten(x) self.assertEqual(expected_flat_outs, actual_flat_outs) self.assertEqual(expected_tree_spec, actual_tree_spec) unflattened_x = torch.utils._pytree.tree_unflatten(actual_flat_outs, actual_tree_spec) self.assertEqual(x, unflattened_x)
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/others/test_check_dummies.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import unittest git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path check_dummies.PATH_TO_DIFFUSERS = os.path.join(git_repo_path, "src", "diffusers") class CheckDummiesTester(unittest.TestCase): def test_find_backend(self): simple_backend = find_backend(" if not is_torch_available():") self.assertEqual(simple_backend, "torch") # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") double_backend = find_backend(" if not (is_torch_available() and is_transformers_available()):") self.assertEqual(double_backend, "torch_and_transformers") # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") triple_backend = find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" ) self.assertEqual(triple_backend, "torch_and_transformers_and_onnx") def test_read_init(self): objects = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch", objects) self.assertIn("torch_and_transformers", objects) self.assertIn("flax_and_transformers", objects) self.assertIn("torch_and_transformers_and_onnx", objects) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel", objects["torch"]) self.assertIn("FlaxUNet2DConditionModel", objects["flax"]) self.assertIn("StableDiffusionPipeline", objects["torch_and_transformers"]) self.assertIn("FlaxStableDiffusionPipeline", objects["flax_and_transformers"]) self.assertIn("LMSDiscreteScheduler", objects["torch_and_scipy"]) self.assertIn("OnnxStableDiffusionPipeline", objects["torch_and_transformers_and_onnx"]) def test_create_dummy_object(self): dummy_constant = create_dummy_object("CONSTANT", "'torch'") self.assertEqual(dummy_constant, "\nCONSTANT = None\n") dummy_function = create_dummy_object("function", "'torch'") self.assertEqual( dummy_function, "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) expected_dummy_class = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, 'torch') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, 'torch') """ dummy_class = create_dummy_object("FakeClass", "'torch'") self.assertEqual(dummy_class, expected_dummy_class) def test_create_dummy_files(self): expected_dummy_pytorch_file = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) """ dummy_files = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]}) self.assertEqual(dummy_files["torch"], expected_dummy_pytorch_file)
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/others/test_config.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from diffusers import ( DDIMScheduler, DDPMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, PNDMScheduler, logging, ) from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.utils.testing_utils import CaptureLogger class SampleObject(ConfigMixin): config_name = "config.json" @register_to_config def __init__( self, a=2, b=5, c=(2, 5), d="for diffusion", e=[1, 3], ): pass class SampleObject2(ConfigMixin): config_name = "config.json" @register_to_config def __init__( self, a=2, b=5, c=(2, 5), d="for diffusion", f=[1, 3], ): pass class SampleObject3(ConfigMixin): config_name = "config.json" @register_to_config def __init__( self, a=2, b=5, c=(2, 5), d="for diffusion", e=[1, 3], f=[1, 3], ): pass class SampleObject4(ConfigMixin): config_name = "config.json" @register_to_config def __init__( self, a=2, b=5, c=(2, 5), d="for diffusion", e=[1, 5], f=[5, 4], ): pass class ConfigTester(unittest.TestCase): def test_load_not_from_mixin(self): with self.assertRaises(ValueError): ConfigMixin.load_config("dummy_path") def test_register_to_config(self): obj = SampleObject() config = obj.config assert config["a"] == 2 assert config["b"] == 5 assert config["c"] == (2, 5) assert config["d"] == "for diffusion" assert config["e"] == [1, 3] # init ignore private arguments obj = SampleObject(_name_or_path="lalala") config = obj.config assert config["a"] == 2 assert config["b"] == 5 assert config["c"] == (2, 5) assert config["d"] == "for diffusion" assert config["e"] == [1, 3] # can override default obj = SampleObject(c=6) config = obj.config assert config["a"] == 2 assert config["b"] == 5 assert config["c"] == 6 assert config["d"] == "for diffusion" assert config["e"] == [1, 3] # can use positional arguments. obj = SampleObject(1, c=6) config = obj.config assert config["a"] == 1 assert config["b"] == 5 assert config["c"] == 6 assert config["d"] == "for diffusion" assert config["e"] == [1, 3] def test_save_load(self): obj = SampleObject() config = obj.config assert config["a"] == 2 assert config["b"] == 5 assert config["c"] == (2, 5) assert config["d"] == "for diffusion" assert config["e"] == [1, 3] with tempfile.TemporaryDirectory() as tmpdirname: obj.save_config(tmpdirname) new_obj = SampleObject.from_config(SampleObject.load_config(tmpdirname)) new_config = new_obj.config # unfreeze configs config = dict(config) new_config = dict(new_config) assert config.pop("c") == (2, 5) # instantiated as tuple assert new_config.pop("c") == [2, 5] # saved & loaded as list because of json config.pop("_use_default_values") assert config == new_config def test_load_ddim_from_pndm(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: ddim = DDIMScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" ) assert ddim.__class__ == DDIMScheduler # no warning should be thrown assert cap_logger.out == "" def test_load_euler_from_pndm(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: euler = EulerDiscreteScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" ) assert euler.__class__ == EulerDiscreteScheduler # no warning should be thrown assert cap_logger.out == "" def test_load_euler_ancestral_from_pndm(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: euler = EulerAncestralDiscreteScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" ) assert euler.__class__ == EulerAncestralDiscreteScheduler # no warning should be thrown assert cap_logger.out == "" def test_load_pndm(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: pndm = PNDMScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" ) assert pndm.__class__ == PNDMScheduler # no warning should be thrown assert cap_logger.out == "" def test_overwrite_config_on_load(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: ddpm = DDPMScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler", prediction_type="sample", beta_end=8, ) with CaptureLogger(logger) as cap_logger_2: ddpm_2 = DDPMScheduler.from_pretrained("google/ddpm-celebahq-256", beta_start=88) assert ddpm.__class__ == DDPMScheduler assert ddpm.config.prediction_type == "sample" assert ddpm.config.beta_end == 8 assert ddpm_2.config.beta_start == 88 # no warning should be thrown assert cap_logger.out == "" assert cap_logger_2.out == "" def test_load_dpmsolver(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: dpm = DPMSolverMultistepScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" ) assert dpm.__class__ == DPMSolverMultistepScheduler # no warning should be thrown assert cap_logger.out == "" def test_use_default_values(self): # let's first save a config that should be in the form # a=2, # b=5, # c=(2, 5), # d="for diffusion", # e=[1, 3], config = SampleObject() config_dict = {k: v for k, v in config.config.items() if not k.startswith("_")} # make sure that default config has all keys in `_use_default_values` assert set(config_dict.keys()) == set(config.config._use_default_values) with tempfile.TemporaryDirectory() as tmpdirname: config.save_config(tmpdirname) # now loading it with SampleObject2 should put f into `_use_default_values` config = SampleObject2.from_config(tmpdirname) assert "f" in config._use_default_values assert config.f == [1, 3] # now loading the config, should **NOT** use [1, 3] for `f`, but the default [1, 4] value # **BECAUSE** it is part of `config._use_default_values` new_config = SampleObject4.from_config(config.config) assert new_config.f == [5, 4] config.config._use_default_values.pop() new_config_2 = SampleObject4.from_config(config.config) assert new_config_2.f == [1, 3] # Nevertheless "e" should still be correctly loaded to [1, 3] from SampleObject2 instead of defaulting to [1, 5] assert new_config_2.e == [1, 3]
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/others/test_utils.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from distutils.util import strtobool import pytest from diffusers import __version__ from diffusers.utils import deprecate # Used to test the hub USER = "__DUMMY_TRANSFORMERS_USER__" ENDPOINT_STAGING = "https://hub-ci.huggingface.co" # Not critical, only usable on the sandboxed CI instance. TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL" class DeprecateTester(unittest.TestCase): higher_version = ".".join([str(int(__version__.split(".")[0]) + 1)] + __version__.split(".")[1:]) lower_version = "0.0.1" def test_deprecate_function_arg(self): kwargs = {"deprecated_arg": 4} with self.assertWarns(FutureWarning) as warning: output = deprecate("deprecated_arg", self.higher_version, "message", take_from=kwargs) assert output == 4 assert ( str(warning.warning) == f"The `deprecated_arg` argument is deprecated and will be removed in version {self.higher_version}." " message" ) def test_deprecate_function_arg_tuple(self): kwargs = {"deprecated_arg": 4} with self.assertWarns(FutureWarning) as warning: output = deprecate(("deprecated_arg", self.higher_version, "message"), take_from=kwargs) assert output == 4 assert ( str(warning.warning) == f"The `deprecated_arg` argument is deprecated and will be removed in version {self.higher_version}." " message" ) def test_deprecate_function_args(self): kwargs = {"deprecated_arg_1": 4, "deprecated_arg_2": 8} with self.assertWarns(FutureWarning) as warning: output_1, output_2 = deprecate( ("deprecated_arg_1", self.higher_version, "Hey"), ("deprecated_arg_2", self.higher_version, "Hey"), take_from=kwargs, ) assert output_1 == 4 assert output_2 == 8 assert ( str(warning.warnings[0].message) == "The `deprecated_arg_1` argument is deprecated and will be removed in version" f" {self.higher_version}. Hey" ) assert ( str(warning.warnings[1].message) == "The `deprecated_arg_2` argument is deprecated and will be removed in version" f" {self.higher_version}. Hey" ) def test_deprecate_function_incorrect_arg(self): kwargs = {"deprecated_arg": 4} with self.assertRaises(TypeError) as error: deprecate(("wrong_arg", self.higher_version, "message"), take_from=kwargs) assert "test_deprecate_function_incorrect_arg in" in str(error.exception) assert "line" in str(error.exception) assert "got an unexpected keyword argument `deprecated_arg`" in str(error.exception) def test_deprecate_arg_no_kwarg(self): with self.assertWarns(FutureWarning) as warning: deprecate(("deprecated_arg", self.higher_version, "message")) assert ( str(warning.warning) == f"`deprecated_arg` is deprecated and will be removed in version {self.higher_version}. message" ) def test_deprecate_args_no_kwarg(self): with self.assertWarns(FutureWarning) as warning: deprecate( ("deprecated_arg_1", self.higher_version, "Hey"), ("deprecated_arg_2", self.higher_version, "Hey"), ) assert ( str(warning.warnings[0].message) == f"`deprecated_arg_1` is deprecated and will be removed in version {self.higher_version}. Hey" ) assert ( str(warning.warnings[1].message) == f"`deprecated_arg_2` is deprecated and will be removed in version {self.higher_version}. Hey" ) def test_deprecate_class_obj(self): class Args: arg = 5 with self.assertWarns(FutureWarning) as warning: arg = deprecate(("arg", self.higher_version, "message"), take_from=Args()) assert arg == 5 assert ( str(warning.warning) == f"The `arg` attribute is deprecated and will be removed in version {self.higher_version}. message" ) def test_deprecate_class_objs(self): class Args: arg = 5 foo = 7 with self.assertWarns(FutureWarning) as warning: arg_1, arg_2 = deprecate( ("arg", self.higher_version, "message"), ("foo", self.higher_version, "message"), ("does not exist", self.higher_version, "message"), take_from=Args(), ) assert arg_1 == 5 assert arg_2 == 7 assert ( str(warning.warning) == f"The `arg` attribute is deprecated and will be removed in version {self.higher_version}. message" ) assert ( str(warning.warnings[0].message) == f"The `arg` attribute is deprecated and will be removed in version {self.higher_version}. message" ) assert ( str(warning.warnings[1].message) == f"The `foo` attribute is deprecated and will be removed in version {self.higher_version}. message" ) def test_deprecate_incorrect_version(self): kwargs = {"deprecated_arg": 4} with self.assertRaises(ValueError) as error: deprecate(("wrong_arg", self.lower_version, "message"), take_from=kwargs) assert ( str(error.exception) == "The deprecation tuple ('wrong_arg', '0.0.1', 'message') should be removed since diffusers' version" f" {__version__} is >= {self.lower_version}" ) def test_deprecate_incorrect_no_standard_warn(self): with self.assertWarns(FutureWarning) as warning: deprecate(("deprecated_arg", self.higher_version, "This message is better!!!"), standard_warn=False) assert str(warning.warning) == "This message is better!!!" def test_deprecate_stacklevel(self): with self.assertWarns(FutureWarning) as warning: deprecate(("deprecated_arg", self.higher_version, "This message is better!!!"), standard_warn=False) assert str(warning.warning) == "This message is better!!!" assert "diffusers/tests/others/test_utils.py" in warning.filename def parse_flag_from_env(key, default=False): try: value = os.environ[key] except KeyError: # KEY isn't set, default to `default`. _value = default else: # KEY is set, convert it to True or False. try: _value = strtobool(value) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f"If set, {key} must be yes or no.") return _value _run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False) def is_staging_test(test_case): """ Decorator marking a test as a staging test. Those tests will run using the staging environment of huggingface.co instead of the real model hub. """ if not _run_staging: return unittest.skip("test is staging test")(test_case) else: return pytest.mark.is_staging_test()(test_case)
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/others/test_image_processor.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import PIL.Image import torch from diffusers.image_processor import VaeImageProcessor class ImageProcessorTest(unittest.TestCase): @property def dummy_sample(self): batch_size = 1 num_channels = 3 height = 8 width = 8 sample = torch.rand((batch_size, num_channels, height, width)) return sample @property def dummy_mask(self): batch_size = 1 num_channels = 1 height = 8 width = 8 sample = torch.rand((batch_size, num_channels, height, width)) return sample def to_np(self, image): if isinstance(image[0], PIL.Image.Image): return np.stack([np.array(i) for i in image], axis=0) elif isinstance(image, torch.Tensor): return image.cpu().numpy().transpose(0, 2, 3, 1) return image def test_vae_image_processor_pt(self): image_processor = VaeImageProcessor(do_resize=False, do_normalize=True) input_pt = self.dummy_sample input_np = self.to_np(input_pt) for output_type in ["pt", "np", "pil"]: out = image_processor.postprocess( image_processor.preprocess(input_pt), output_type=output_type, ) out_np = self.to_np(out) in_np = (input_np * 255).round() if output_type == "pil" else input_np assert ( np.abs(in_np - out_np).max() < 1e-6 ), f"decoded output does not match input for output_type {output_type}" def test_vae_image_processor_np(self): image_processor = VaeImageProcessor(do_resize=False, do_normalize=True) input_np = self.dummy_sample.cpu().numpy().transpose(0, 2, 3, 1) for output_type in ["pt", "np", "pil"]: out = image_processor.postprocess(image_processor.preprocess(input_np), output_type=output_type) out_np = self.to_np(out) in_np = (input_np * 255).round() if output_type == "pil" else input_np assert ( np.abs(in_np - out_np).max() < 1e-6 ), f"decoded output does not match input for output_type {output_type}" def test_vae_image_processor_pil(self): image_processor = VaeImageProcessor(do_resize=False, do_normalize=True) input_np = self.dummy_sample.cpu().numpy().transpose(0, 2, 3, 1) input_pil = image_processor.numpy_to_pil(input_np) for output_type in ["pt", "np", "pil"]: out = image_processor.postprocess(image_processor.preprocess(input_pil), output_type=output_type) for i, o in zip(input_pil, out): in_np = np.array(i) out_np = self.to_np(out) if output_type == "pil" else (self.to_np(out) * 255).round() assert ( np.abs(in_np - out_np).max() < 1e-6 ), f"decoded output does not match input for output_type {output_type}" def test_preprocess_input_3d(self): image_processor = VaeImageProcessor(do_resize=False, do_normalize=False) input_pt_4d = self.dummy_sample input_pt_3d = input_pt_4d.squeeze(0) out_pt_4d = image_processor.postprocess( image_processor.preprocess(input_pt_4d), output_type="np", ) out_pt_3d = image_processor.postprocess( image_processor.preprocess(input_pt_3d), output_type="np", ) input_np_4d = self.to_np(self.dummy_sample) input_np_3d = input_np_4d.squeeze(0) out_np_4d = image_processor.postprocess( image_processor.preprocess(input_np_4d), output_type="np", ) out_np_3d = image_processor.postprocess( image_processor.preprocess(input_np_3d), output_type="np", ) assert np.abs(out_pt_4d - out_pt_3d).max() < 1e-6 assert np.abs(out_np_4d - out_np_3d).max() < 1e-6 def test_preprocess_input_list(self): image_processor = VaeImageProcessor(do_resize=False, do_normalize=False) input_pt_4d = self.dummy_sample input_pt_list = list(input_pt_4d) out_pt_4d = image_processor.postprocess( image_processor.preprocess(input_pt_4d), output_type="np", ) out_pt_list = image_processor.postprocess( image_processor.preprocess(input_pt_list), output_type="np", ) input_np_4d = self.to_np(self.dummy_sample) input_np_list = list(input_np_4d) out_np_4d = image_processor.postprocess( image_processor.preprocess(input_np_4d), output_type="np", ) out_np_list = image_processor.postprocess( image_processor.preprocess(input_np_list), output_type="np", ) assert np.abs(out_pt_4d - out_pt_list).max() < 1e-6 assert np.abs(out_np_4d - out_np_list).max() < 1e-6 def test_preprocess_input_mask_3d(self): image_processor = VaeImageProcessor( do_resize=False, do_normalize=False, do_binarize=True, do_convert_grayscale=True ) input_pt_4d = self.dummy_mask input_pt_3d = input_pt_4d.squeeze(0) input_pt_2d = input_pt_3d.squeeze(0) out_pt_4d = image_processor.postprocess( image_processor.preprocess(input_pt_4d), output_type="np", ) out_pt_3d = image_processor.postprocess( image_processor.preprocess(input_pt_3d), output_type="np", ) out_pt_2d = image_processor.postprocess( image_processor.preprocess(input_pt_2d), output_type="np", ) input_np_4d = self.to_np(self.dummy_mask) input_np_3d = input_np_4d.squeeze(0) input_np_3d_1 = input_np_4d.squeeze(-1) input_np_2d = input_np_3d.squeeze(-1) out_np_4d = image_processor.postprocess( image_processor.preprocess(input_np_4d), output_type="np", ) out_np_3d = image_processor.postprocess( image_processor.preprocess(input_np_3d), output_type="np", ) out_np_3d_1 = image_processor.postprocess( image_processor.preprocess(input_np_3d_1), output_type="np", ) out_np_2d = image_processor.postprocess( image_processor.preprocess(input_np_2d), output_type="np", ) assert np.abs(out_pt_4d - out_pt_3d).max() == 0 assert np.abs(out_pt_4d - out_pt_2d).max() == 0 assert np.abs(out_np_4d - out_np_3d).max() == 0 assert np.abs(out_np_4d - out_np_3d_1).max() == 0 assert np.abs(out_np_4d - out_np_2d).max() == 0 def test_preprocess_input_mask_list(self): image_processor = VaeImageProcessor(do_resize=False, do_normalize=False, do_convert_grayscale=True) input_pt_4d = self.dummy_mask input_pt_3d = input_pt_4d.squeeze(0) input_pt_2d = input_pt_3d.squeeze(0) inputs_pt = [input_pt_4d, input_pt_3d, input_pt_2d] inputs_pt_list = [[input_pt] for input_pt in inputs_pt] for input_pt, input_pt_list in zip(inputs_pt, inputs_pt_list): out_pt = image_processor.postprocess( image_processor.preprocess(input_pt), output_type="np", ) out_pt_list = image_processor.postprocess( image_processor.preprocess(input_pt_list), output_type="np", ) assert np.abs(out_pt - out_pt_list).max() < 1e-6 input_np_4d = self.to_np(self.dummy_mask) input_np_3d = input_np_4d.squeeze(0) input_np_2d = input_np_3d.squeeze(-1) inputs_np = [input_np_4d, input_np_3d, input_np_2d] inputs_np_list = [[input_np] for input_np in inputs_np] for input_np, input_np_list in zip(inputs_np, inputs_np_list): out_np = image_processor.postprocess( image_processor.preprocess(input_np), output_type="np", ) out_np_list = image_processor.postprocess( image_processor.preprocess(input_np_list), output_type="np", ) assert np.abs(out_np - out_np_list).max() < 1e-6 def test_preprocess_input_mask_3d_batch(self): image_processor = VaeImageProcessor(do_resize=False, do_normalize=False, do_convert_grayscale=True) # create a dummy mask input with batch_size 2 dummy_mask_batch = torch.cat([self.dummy_mask] * 2, axis=0) # squeeze out the channel dimension input_pt_3d = dummy_mask_batch.squeeze(1) input_np_3d = self.to_np(dummy_mask_batch).squeeze(-1) input_pt_3d_list = list(input_pt_3d) input_np_3d_list = list(input_np_3d) out_pt_3d = image_processor.postprocess( image_processor.preprocess(input_pt_3d), output_type="np", ) out_pt_3d_list = image_processor.postprocess( image_processor.preprocess(input_pt_3d_list), output_type="np", ) assert np.abs(out_pt_3d - out_pt_3d_list).max() < 1e-6 out_np_3d = image_processor.postprocess( image_processor.preprocess(input_np_3d), output_type="np", ) out_np_3d_list = image_processor.postprocess( image_processor.preprocess(input_np_3d_list), output_type="np", ) assert np.abs(out_np_3d - out_np_3d_list).max() < 1e-6 def test_vae_image_processor_resize_pt(self): image_processor = VaeImageProcessor(do_resize=True, vae_scale_factor=1) input_pt = self.dummy_sample b, c, h, w = input_pt.shape scale = 2 out_pt = image_processor.resize(image=input_pt, height=h // scale, width=w // scale) exp_pt_shape = (b, c, h // scale, w // scale) assert ( out_pt.shape == exp_pt_shape ), f"resized image output shape '{out_pt.shape}' didn't match expected shape '{exp_pt_shape}'." def test_vae_image_processor_resize_np(self): image_processor = VaeImageProcessor(do_resize=True, vae_scale_factor=1) input_pt = self.dummy_sample b, c, h, w = input_pt.shape scale = 2 input_np = self.to_np(input_pt) out_np = image_processor.resize(image=input_np, height=h // scale, width=w // scale) exp_np_shape = (b, h // scale, w // scale, c) assert ( out_np.shape == exp_np_shape ), f"resized image output shape '{out_np.shape}' didn't match expected shape '{exp_np_shape}'."
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/others/test_hub_utils.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from pathlib import Path from tempfile import TemporaryDirectory from unittest.mock import Mock, patch import diffusers.utils.hub_utils class CreateModelCardTest(unittest.TestCase): @patch("diffusers.utils.hub_utils.get_full_repo_name") def test_create_model_card(self, repo_name_mock: Mock) -> None: repo_name_mock.return_value = "full_repo_name" with TemporaryDirectory() as tmpdir: # Dummy args values args = Mock() args.output_dir = tmpdir args.local_rank = 0 args.hub_token = "hub_token" args.dataset_name = "dataset_name" args.learning_rate = 0.01 args.train_batch_size = 100000 args.eval_batch_size = 10000 args.gradient_accumulation_steps = 0.01 args.adam_beta1 = 0.02 args.adam_beta2 = 0.03 args.adam_weight_decay = 0.0005 args.adam_epsilon = 0.000001 args.lr_scheduler = 1 args.lr_warmup_steps = 10 args.ema_inv_gamma = 0.001 args.ema_power = 0.1 args.ema_max_decay = 0.2 args.mixed_precision = True # Model card mush be rendered and saved diffusers.utils.hub_utils.create_model_card(args, model_name="model_name") self.assertTrue((Path(tmpdir) / "README.md").is_file())
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/others/test_training.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNet2DModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow torch.backends.cuda.matmul.allow_tf32 = False class TrainingTests(unittest.TestCase): def get_model_optimizer(self, resolution=32): set_seed(0) model = UNet2DModel(sample_size=resolution, in_channels=3, out_channels=3) optimizer = torch.optim.SGD(model.parameters(), lr=0.0001) return model, optimizer @slow def test_training_step_equality(self): device = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable ddpm_scheduler = DDPMScheduler( num_train_timesteps=1000, beta_start=0.0001, beta_end=0.02, beta_schedule="linear", clip_sample=True, ) ddim_scheduler = DDIMScheduler( num_train_timesteps=1000, beta_start=0.0001, beta_end=0.02, beta_schedule="linear", clip_sample=True, ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0) clean_images = [torch.randn((4, 3, 32, 32)).clip(-1, 1).to(device) for _ in range(4)] noise = [torch.randn((4, 3, 32, 32)).to(device) for _ in range(4)] timesteps = [torch.randint(0, 1000, (4,)).long().to(device) for _ in range(4)] # train with a DDPM scheduler model, optimizer = self.get_model_optimizer(resolution=32) model.train().to(device) for i in range(4): optimizer.zero_grad() ddpm_noisy_images = ddpm_scheduler.add_noise(clean_images[i], noise[i], timesteps[i]) ddpm_noise_pred = model(ddpm_noisy_images, timesteps[i]).sample loss = torch.nn.functional.mse_loss(ddpm_noise_pred, noise[i]) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM model, optimizer = self.get_model_optimizer(resolution=32) model.train().to(device) for i in range(4): optimizer.zero_grad() ddim_noisy_images = ddim_scheduler.add_noise(clean_images[i], noise[i], timesteps[i]) ddim_noise_pred = model(ddim_noisy_images, timesteps[i]).sample loss = torch.nn.functional.mse_loss(ddim_noise_pred, noise[i]) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(ddpm_noisy_images, ddim_noisy_images, atol=1e-5)) self.assertTrue(torch.allclose(ddpm_noise_pred, ddim_noise_pred, atol=1e-5))
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/others/test_ema.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import torch from diffusers import UNet2DConditionModel from diffusers.training_utils import EMAModel from diffusers.utils.testing_utils import enable_full_determinism, skip_mps, torch_device enable_full_determinism() class EMAModelTests(unittest.TestCase): model_id = "hf-internal-testing/tiny-stable-diffusion-pipe" batch_size = 1 prompt_length = 77 text_encoder_hidden_dim = 32 num_in_channels = 4 latent_height = latent_width = 64 generator = torch.manual_seed(0) def get_models(self, decay=0.9999): unet = UNet2DConditionModel.from_pretrained(self.model_id, subfolder="unet") unet = unet.to(torch_device) ema_unet = EMAModel(unet.parameters(), decay=decay, model_cls=UNet2DConditionModel, model_config=unet.config) return unet, ema_unet def get_dummy_inputs(self): noisy_latents = torch.randn( self.batch_size, self.num_in_channels, self.latent_height, self.latent_width, generator=self.generator ).to(torch_device) timesteps = torch.randint(0, 1000, size=(self.batch_size,), generator=self.generator).to(torch_device) encoder_hidden_states = torch.randn( self.batch_size, self.prompt_length, self.text_encoder_hidden_dim, generator=self.generator ).to(torch_device) return noisy_latents, timesteps, encoder_hidden_states def simulate_backprop(self, unet): updated_state_dict = {} for k, param in unet.state_dict().items(): updated_param = torch.randn_like(param) + (param * torch.randn_like(param)) updated_state_dict.update({k: updated_param}) unet.load_state_dict(updated_state_dict) return unet def test_optimization_steps_updated(self): unet, ema_unet = self.get_models() # Take the first (hypothetical) EMA step. ema_unet.step(unet.parameters()) assert ema_unet.optimization_step == 1 # Take two more. for _ in range(2): ema_unet.step(unet.parameters()) assert ema_unet.optimization_step == 3 def test_shadow_params_not_updated(self): unet, ema_unet = self.get_models() # Since the `unet` is not being updated (i.e., backprop'd) # there won't be any difference between the `params` of `unet` # and `ema_unet` even if we call `ema_unet.step(unet.parameters())`. ema_unet.step(unet.parameters()) orig_params = list(unet.parameters()) for s_param, param in zip(ema_unet.shadow_params, orig_params): assert torch.allclose(s_param, param) # The above holds true even if we call `ema.step()` multiple times since # `unet` params are still not being updated. for _ in range(4): ema_unet.step(unet.parameters()) for s_param, param in zip(ema_unet.shadow_params, orig_params): assert torch.allclose(s_param, param) def test_shadow_params_updated(self): unet, ema_unet = self.get_models() # Here we simulate the parameter updates for `unet`. Since there might # be some parameters which are initialized to zero we take extra care to # initialize their values to something non-zero before the multiplication. unet_pseudo_updated_step_one = self.simulate_backprop(unet) # Take the EMA step. ema_unet.step(unet_pseudo_updated_step_one.parameters()) # Now the EMA'd parameters won't be equal to the original model parameters. orig_params = list(unet_pseudo_updated_step_one.parameters()) for s_param, param in zip(ema_unet.shadow_params, orig_params): assert ~torch.allclose(s_param, param) # Ensure this is the case when we take multiple EMA steps. for _ in range(4): ema_unet.step(unet.parameters()) for s_param, param in zip(ema_unet.shadow_params, orig_params): assert ~torch.allclose(s_param, param) def test_consecutive_shadow_params_updated(self): # If we call EMA step after a backpropagation consecutively for two times, # the shadow params from those two steps should be different. unet, ema_unet = self.get_models() # First backprop + EMA unet_step_one = self.simulate_backprop(unet) ema_unet.step(unet_step_one.parameters()) step_one_shadow_params = ema_unet.shadow_params # Second backprop + EMA unet_step_two = self.simulate_backprop(unet_step_one) ema_unet.step(unet_step_two.parameters()) step_two_shadow_params = ema_unet.shadow_params for step_one, step_two in zip(step_one_shadow_params, step_two_shadow_params): assert ~torch.allclose(step_one, step_two) def test_zero_decay(self): # If there's no decay even if there are backprops, EMA steps # won't take any effect i.e., the shadow params would remain the # same. unet, ema_unet = self.get_models(decay=0.0) unet_step_one = self.simulate_backprop(unet) ema_unet.step(unet_step_one.parameters()) step_one_shadow_params = ema_unet.shadow_params unet_step_two = self.simulate_backprop(unet_step_one) ema_unet.step(unet_step_two.parameters()) step_two_shadow_params = ema_unet.shadow_params for step_one, step_two in zip(step_one_shadow_params, step_two_shadow_params): assert torch.allclose(step_one, step_two) @skip_mps def test_serialization(self): unet, ema_unet = self.get_models() noisy_latents, timesteps, encoder_hidden_states = self.get_dummy_inputs() with tempfile.TemporaryDirectory() as tmpdir: ema_unet.save_pretrained(tmpdir) loaded_unet = UNet2DConditionModel.from_pretrained(tmpdir, model_cls=UNet2DConditionModel) loaded_unet = loaded_unet.to(unet.device) # Since no EMA step has been performed the outputs should match. output = unet(noisy_latents, timesteps, encoder_hidden_states).sample output_loaded = loaded_unet(noisy_latents, timesteps, encoder_hidden_states).sample assert torch.allclose(output, output_loaded, atol=1e-4)
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/others/test_check_copies.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import re import shutil import sys import tempfile import unittest git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. REFERENCE_CODE = """ \""" Output class for the scheduler's `step` function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample `(x_{0})` based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. \""" prev_sample: torch.FloatTensor pred_original_sample: Optional[torch.FloatTensor] = None """ class CopyCheckTester(unittest.TestCase): def setUp(self): self.diffusers_dir = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir, "schedulers/")) check_copies.DIFFUSERS_PATH = self.diffusers_dir shutil.copy( os.path.join(git_repo_path, "src/diffusers/schedulers/scheduling_ddpm.py"), os.path.join(self.diffusers_dir, "schedulers/scheduling_ddpm.py"), ) def tearDown(self): check_copies.DIFFUSERS_PATH = "src/diffusers" shutil.rmtree(self.diffusers_dir) def check_copy_consistency(self, comment, class_name, class_code, overwrite_result=None): code = comment + f"\nclass {class_name}(nn.Module):\n" + class_code if overwrite_result is not None: expected = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result code = check_copies.run_ruff(code) fname = os.path.join(self.diffusers_dir, "new_code.py") with open(fname, "w", newline="\n") as f: f.write(code) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(fname)) == 0) else: check_copies.is_copy_consistent(f.name, overwrite=True) with open(fname, "r") as f: self.assertTrue(f.read(), expected) def test_find_code_in_diffusers(self): code = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput") self.assertEqual(code, REFERENCE_CODE) def test_is_copy_consistent(self): # Base copy consistency self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput", "DDPMSchedulerOutput", REFERENCE_CODE + "\n", ) # With no empty line at the end self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput", "DDPMSchedulerOutput", REFERENCE_CODE, ) # Copy consistency with rename self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test", "TestSchedulerOutput", re.sub("DDPM", "Test", REFERENCE_CODE), ) # Copy consistency with a really long name long_class_name = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason" self.check_copy_consistency( f"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}", f"{long_class_name}SchedulerOutput", re.sub("Bert", long_class_name, REFERENCE_CODE), ) # Copy consistency with overwrite self.check_copy_consistency( "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test", "TestSchedulerOutput", REFERENCE_CODE, overwrite_result=re.sub("DDPM", "Test", REFERENCE_CODE), )
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/others/test_dependencies.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest from importlib import import_module class DependencyTester(unittest.TestCase): def test_diffusers_import(self): try: import diffusers # noqa: F401 except ImportError: assert False def test_backend_registration(self): import diffusers from diffusers.dependency_versions_table import deps all_classes = inspect.getmembers(diffusers, inspect.isclass) for cls_name, cls_module in all_classes: if "dummy_" in cls_module.__module__: for backend in cls_module._backends: if backend == "k_diffusion": backend = "k-diffusion" elif backend == "invisible_watermark": backend = "invisible-watermark" assert backend in deps, f"{backend} is not in the deps table!" def test_pipeline_imports(self): import diffusers import diffusers.pipelines all_classes = inspect.getmembers(diffusers, inspect.isclass) for cls_name, cls_module in all_classes: if hasattr(diffusers.pipelines, cls_name): pipeline_folder_module = ".".join(str(cls_module.__module__).split(".")[:3]) _ = import_module(pipeline_folder_module, str(cls_name))
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/pipelines/pipeline_params.py
# These are canonical sets of parameters for different types of pipelines. # They are set on subclasses of `PipelineTesterMixin` as `params` and # `batch_params`. # # If a pipeline's set of arguments has minor changes from one of the common sets # of arguments, do not make modifications to the existing common sets of arguments. # I.e. a text to image pipeline with non-configurable height and width arguments # should set its attribute as `params = TEXT_TO_IMAGE_PARAMS - {'height', 'width'}`. TEXT_TO_IMAGE_PARAMS = frozenset( [ "prompt", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) TEXT_TO_IMAGE_BATCH_PARAMS = frozenset(["prompt", "negative_prompt"]) TEXT_TO_IMAGE_IMAGE_PARAMS = frozenset([]) IMAGE_TO_IMAGE_IMAGE_PARAMS = frozenset(["image"]) IMAGE_VARIATION_PARAMS = frozenset( [ "image", "height", "width", "guidance_scale", ] ) IMAGE_VARIATION_BATCH_PARAMS = frozenset(["image"]) TEXT_GUIDED_IMAGE_VARIATION_PARAMS = frozenset( [ "prompt", "image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS = frozenset(["prompt", "image", "negative_prompt"]) TEXT_GUIDED_IMAGE_INPAINTING_PARAMS = frozenset( [ # Text guided image variation with an image mask "prompt", "image", "mask_image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS = frozenset(["prompt", "image", "mask_image", "negative_prompt"]) IMAGE_INPAINTING_PARAMS = frozenset( [ # image variation with an image mask "image", "mask_image", "height", "width", "guidance_scale", ] ) IMAGE_INPAINTING_BATCH_PARAMS = frozenset(["image", "mask_image"]) IMAGE_GUIDED_IMAGE_INPAINTING_PARAMS = frozenset( [ "example_image", "image", "mask_image", "height", "width", "guidance_scale", ] ) IMAGE_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS = frozenset(["example_image", "image", "mask_image"]) CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS = frozenset(["class_labels"]) CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS = frozenset(["class_labels"]) UNCONDITIONAL_IMAGE_GENERATION_PARAMS = frozenset(["batch_size"]) UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS = frozenset([]) UNCONDITIONAL_AUDIO_GENERATION_PARAMS = frozenset(["batch_size"]) UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS = frozenset([]) TEXT_TO_AUDIO_PARAMS = frozenset( [ "prompt", "audio_length_in_s", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) TEXT_TO_AUDIO_BATCH_PARAMS = frozenset(["prompt", "negative_prompt"]) TOKENS_TO_AUDIO_GENERATION_PARAMS = frozenset(["input_tokens"]) TOKENS_TO_AUDIO_GENERATION_BATCH_PARAMS = frozenset(["input_tokens"]) TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS = frozenset(["prompt_embeds"])
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/pipelines/test_pipelines_combined.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from huggingface_hub import ModelCard from diffusers import ( DDPMScheduler, DiffusionPipeline, KandinskyV22CombinedPipeline, KandinskyV22Pipeline, KandinskyV22PriorPipeline, ) from diffusers.pipelines.pipeline_utils import CONNECTED_PIPES_KEYS def state_dicts_almost_equal(sd1, sd2): sd1 = dict(sorted(sd1.items())) sd2 = dict(sorted(sd2.items())) models_are_equal = True for ten1, ten2 in zip(sd1.values(), sd2.values()): if (ten1 - ten2).abs().sum() > 1e-3: models_are_equal = False return models_are_equal class CombinedPipelineFastTest(unittest.TestCase): def modelcard_has_connected_pipeline(self, model_id): modelcard = ModelCard.load(model_id) connected_pipes = {prefix: getattr(modelcard.data, prefix, [None])[0] for prefix in CONNECTED_PIPES_KEYS} connected_pipes = {k: v for k, v in connected_pipes.items() if v is not None} return len(connected_pipes) > 0 def test_correct_modelcard_format(self): # hf-internal-testing/tiny-random-kandinsky-v22-prior has no metadata assert not self.modelcard_has_connected_pipeline("hf-internal-testing/tiny-random-kandinsky-v22-prior") # see https://huggingface.co/hf-internal-testing/tiny-random-kandinsky-v22-decoder/blob/8baff9897c6be017013e21b5c562e5a381646c7e/README.md?code=true#L2 assert self.modelcard_has_connected_pipeline("hf-internal-testing/tiny-random-kandinsky-v22-decoder") def test_load_connected_checkpoint_when_specified(self): pipeline_prior = DiffusionPipeline.from_pretrained("hf-internal-testing/tiny-random-kandinsky-v22-prior") pipeline_prior_connected = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-random-kandinsky-v22-prior", load_connected_pipeline=True ) # Passing `load_connected_pipeline` to prior is a no-op as the pipeline has no connected pipeline assert pipeline_prior.__class__ == pipeline_prior_connected.__class__ pipeline = DiffusionPipeline.from_pretrained("hf-internal-testing/tiny-random-kandinsky-v22-decoder") pipeline_connected = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-random-kandinsky-v22-decoder", load_connected_pipeline=True ) # Passing `load_connected_pipeline` to decoder loads the combined pipeline assert pipeline.__class__ != pipeline_connected.__class__ assert pipeline.__class__ == KandinskyV22Pipeline assert pipeline_connected.__class__ == KandinskyV22CombinedPipeline # check that loaded components match prior and decoder components assert set(pipeline_connected.components.keys()) == set( ["prior_" + k for k in pipeline_prior.components.keys()] + list(pipeline.components.keys()) ) def test_load_connected_checkpoint_default(self): prior = KandinskyV22PriorPipeline.from_pretrained("hf-internal-testing/tiny-random-kandinsky-v22-prior") decoder = KandinskyV22Pipeline.from_pretrained("hf-internal-testing/tiny-random-kandinsky-v22-decoder") # check that combined pipeline loads both prior & decoder because of # https://huggingface.co/hf-internal-testing/tiny-random-kandinsky-v22-decoder/blob/8baff9897c6be017013e21b5c562e5a381646c7e/README.md?code=true#L3 assert ( KandinskyV22CombinedPipeline._load_connected_pipes ) # combined pipelines will download more checkpoints that just the one specified pipeline = KandinskyV22CombinedPipeline.from_pretrained( "hf-internal-testing/tiny-random-kandinsky-v22-decoder" ) prior_comps = prior.components decoder_comps = decoder.components for k, component in pipeline.components.items(): if k.startswith("prior_"): k = k[6:] comp = prior_comps[k] else: comp = decoder_comps[k] if isinstance(component, torch.nn.Module): assert state_dicts_almost_equal(component.state_dict(), comp.state_dict()) elif hasattr(component, "config"): assert dict(component.config) == dict(comp.config) else: assert component.__class__ == comp.__class__ def test_load_connected_checkpoint_with_passed_obj(self): pipeline = KandinskyV22CombinedPipeline.from_pretrained( "hf-internal-testing/tiny-random-kandinsky-v22-decoder" ) prior_scheduler = DDPMScheduler.from_config(pipeline.prior_scheduler.config) scheduler = DDPMScheduler.from_config(pipeline.scheduler.config) # make sure we pass a different scheduler and prior_scheduler assert pipeline.prior_scheduler.__class__ != prior_scheduler.__class__ assert pipeline.scheduler.__class__ != scheduler.__class__ pipeline_new = KandinskyV22CombinedPipeline.from_pretrained( "hf-internal-testing/tiny-random-kandinsky-v22-decoder", prior_scheduler=prior_scheduler, scheduler=scheduler, ) assert dict(pipeline_new.prior_scheduler.config) == dict(prior_scheduler.config) assert dict(pipeline_new.scheduler.config) == dict(scheduler.config)
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/pipelines/test_pipelines_auto.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import os import shutil import unittest from collections import OrderedDict from pathlib import Path import torch from diffusers import ( AutoPipelineForImage2Image, AutoPipelineForInpainting, AutoPipelineForText2Image, ControlNetModel, DiffusionPipeline, ) from diffusers.pipelines.auto_pipeline import ( AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, AUTO_INPAINT_PIPELINES_MAPPING, AUTO_TEXT2IMAGE_PIPELINES_MAPPING, ) from diffusers.utils.testing_utils import slow PRETRAINED_MODEL_REPO_MAPPING = OrderedDict( [ ("stable-diffusion", "runwayml/stable-diffusion-v1-5"), ("if", "DeepFloyd/IF-I-XL-v1.0"), ("kandinsky", "kandinsky-community/kandinsky-2-1"), ("kandinsky22", "kandinsky-community/kandinsky-2-2-decoder"), ] ) class AutoPipelineFastTest(unittest.TestCase): def test_from_pipe_consistent(self): pipe = AutoPipelineForText2Image.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", requires_safety_checker=False ) original_config = dict(pipe.config) pipe = AutoPipelineForImage2Image.from_pipe(pipe) assert dict(pipe.config) == original_config pipe = AutoPipelineForText2Image.from_pipe(pipe) assert dict(pipe.config) == original_config def test_from_pipe_override(self): pipe = AutoPipelineForText2Image.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", requires_safety_checker=False ) pipe = AutoPipelineForImage2Image.from_pipe(pipe, requires_safety_checker=True) assert pipe.config.requires_safety_checker is True pipe = AutoPipelineForText2Image.from_pipe(pipe, requires_safety_checker=True) assert pipe.config.requires_safety_checker is True def test_from_pipe_consistent_sdxl(self): pipe = AutoPipelineForImage2Image.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-xl-pipe", requires_aesthetics_score=True, force_zeros_for_empty_prompt=False, ) original_config = dict(pipe.config) pipe = AutoPipelineForText2Image.from_pipe(pipe) pipe = AutoPipelineForImage2Image.from_pipe(pipe) assert dict(pipe.config) == original_config def test_kwargs_local_files_only(self): repo = "hf-internal-testing/tiny-stable-diffusion-torch" tmpdirname = DiffusionPipeline.download(repo) tmpdirname = Path(tmpdirname) # edit commit_id to so that it's not the latest commit commit_id = tmpdirname.name new_commit_id = commit_id + "hug" ref_dir = tmpdirname.parent.parent / "refs/main" with open(ref_dir, "w") as f: f.write(new_commit_id) new_tmpdirname = tmpdirname.parent / new_commit_id os.rename(tmpdirname, new_tmpdirname) try: AutoPipelineForText2Image.from_pretrained(repo, local_files_only=True) except OSError: assert False, "not able to load local files" shutil.rmtree(tmpdirname.parent.parent) def test_from_pipe_controlnet_text2img(self): pipe = AutoPipelineForText2Image.from_pretrained("hf-internal-testing/tiny-stable-diffusion-pipe") controlnet = ControlNetModel.from_pretrained("hf-internal-testing/tiny-controlnet") pipe = AutoPipelineForText2Image.from_pipe(pipe, controlnet=controlnet) assert pipe.__class__.__name__ == "StableDiffusionControlNetPipeline" assert "controlnet" in pipe.components pipe = AutoPipelineForText2Image.from_pipe(pipe, controlnet=None) assert pipe.__class__.__name__ == "StableDiffusionPipeline" assert "controlnet" not in pipe.components def test_from_pipe_controlnet_img2img(self): pipe = AutoPipelineForImage2Image.from_pretrained("hf-internal-testing/tiny-stable-diffusion-pipe") controlnet = ControlNetModel.from_pretrained("hf-internal-testing/tiny-controlnet") pipe = AutoPipelineForImage2Image.from_pipe(pipe, controlnet=controlnet) assert pipe.__class__.__name__ == "StableDiffusionControlNetImg2ImgPipeline" assert "controlnet" in pipe.components pipe = AutoPipelineForImage2Image.from_pipe(pipe, controlnet=None) assert pipe.__class__.__name__ == "StableDiffusionImg2ImgPipeline" assert "controlnet" not in pipe.components def test_from_pipe_controlnet_inpaint(self): pipe = AutoPipelineForInpainting.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch") controlnet = ControlNetModel.from_pretrained("hf-internal-testing/tiny-controlnet") pipe = AutoPipelineForInpainting.from_pipe(pipe, controlnet=controlnet) assert pipe.__class__.__name__ == "StableDiffusionControlNetInpaintPipeline" assert "controlnet" in pipe.components pipe = AutoPipelineForInpainting.from_pipe(pipe, controlnet=None) assert pipe.__class__.__name__ == "StableDiffusionInpaintPipeline" assert "controlnet" not in pipe.components def test_from_pipe_controlnet_new_task(self): pipe_text2img = AutoPipelineForText2Image.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch") controlnet = ControlNetModel.from_pretrained("hf-internal-testing/tiny-controlnet") pipe_control_img2img = AutoPipelineForImage2Image.from_pipe(pipe_text2img, controlnet=controlnet) assert pipe_control_img2img.__class__.__name__ == "StableDiffusionControlNetImg2ImgPipeline" assert "controlnet" in pipe_control_img2img.components pipe_inpaint = AutoPipelineForInpainting.from_pipe(pipe_control_img2img, controlnet=None) assert pipe_inpaint.__class__.__name__ == "StableDiffusionInpaintPipeline" assert "controlnet" not in pipe_inpaint.components # testing `from_pipe` for text2img controlnet ## 1. from a different controlnet pipe, without controlnet argument pipe_control_text2img = AutoPipelineForText2Image.from_pipe(pipe_control_img2img) assert pipe_control_text2img.__class__.__name__ == "StableDiffusionControlNetPipeline" assert "controlnet" in pipe_control_text2img.components ## 2. from a different controlnet pipe, with controlnet argument pipe_control_text2img = AutoPipelineForText2Image.from_pipe(pipe_control_img2img, controlnet=controlnet) assert pipe_control_text2img.__class__.__name__ == "StableDiffusionControlNetPipeline" assert "controlnet" in pipe_control_text2img.components ## 3. from same controlnet pipeline class, with a different controlnet component pipe_control_text2img = AutoPipelineForText2Image.from_pipe(pipe_control_text2img, controlnet=controlnet) assert pipe_control_text2img.__class__.__name__ == "StableDiffusionControlNetPipeline" assert "controlnet" in pipe_control_text2img.components # testing from_pipe for inpainting ## 1. from a different controlnet pipeline class pipe_control_inpaint = AutoPipelineForInpainting.from_pipe(pipe_control_img2img) assert pipe_control_inpaint.__class__.__name__ == "StableDiffusionControlNetInpaintPipeline" assert "controlnet" in pipe_control_inpaint.components ## from a different controlnet pipe, with a different controlnet pipe_control_inpaint = AutoPipelineForInpainting.from_pipe(pipe_control_img2img, controlnet=controlnet) assert pipe_control_inpaint.__class__.__name__ == "StableDiffusionControlNetInpaintPipeline" assert "controlnet" in pipe_control_inpaint.components ## from same controlnet pipe, with a different controlnet pipe_control_inpaint = AutoPipelineForInpainting.from_pipe(pipe_control_inpaint, controlnet=controlnet) assert pipe_control_inpaint.__class__.__name__ == "StableDiffusionControlNetInpaintPipeline" assert "controlnet" in pipe_control_inpaint.components # testing from_pipe from img2img controlnet ## from a different controlnet pipe, without controlnet argument pipe_control_img2img = AutoPipelineForImage2Image.from_pipe(pipe_control_text2img) assert pipe_control_img2img.__class__.__name__ == "StableDiffusionControlNetImg2ImgPipeline" assert "controlnet" in pipe_control_img2img.components # from a different controlnet pipe, with a different controlnet component pipe_control_img2img = AutoPipelineForImage2Image.from_pipe(pipe_control_text2img, controlnet=controlnet) assert pipe_control_img2img.__class__.__name__ == "StableDiffusionControlNetImg2ImgPipeline" assert "controlnet" in pipe_control_img2img.components # from same controlnet pipeline class, with a different controlnet pipe_control_img2img = AutoPipelineForImage2Image.from_pipe(pipe_control_img2img, controlnet=controlnet) assert pipe_control_img2img.__class__.__name__ == "StableDiffusionControlNetImg2ImgPipeline" assert "controlnet" in pipe_control_img2img.components @slow class AutoPipelineIntegrationTest(unittest.TestCase): def test_pipe_auto(self): for model_name, model_repo in PRETRAINED_MODEL_REPO_MAPPING.items(): # test txt2img pipe_txt2img = AutoPipelineForText2Image.from_pretrained( model_repo, variant="fp16", torch_dtype=torch.float16 ) self.assertIsInstance(pipe_txt2img, AUTO_TEXT2IMAGE_PIPELINES_MAPPING[model_name]) pipe_to = AutoPipelineForText2Image.from_pipe(pipe_txt2img) self.assertIsInstance(pipe_to, AUTO_TEXT2IMAGE_PIPELINES_MAPPING[model_name]) pipe_to = AutoPipelineForImage2Image.from_pipe(pipe_txt2img) self.assertIsInstance(pipe_to, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING[model_name]) if "kandinsky" not in model_name: pipe_to = AutoPipelineForInpainting.from_pipe(pipe_txt2img) self.assertIsInstance(pipe_to, AUTO_INPAINT_PIPELINES_MAPPING[model_name]) del pipe_txt2img, pipe_to gc.collect() # test img2img pipe_img2img = AutoPipelineForImage2Image.from_pretrained( model_repo, variant="fp16", torch_dtype=torch.float16 ) self.assertIsInstance(pipe_img2img, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING[model_name]) pipe_to = AutoPipelineForText2Image.from_pipe(pipe_img2img) self.assertIsInstance(pipe_to, AUTO_TEXT2IMAGE_PIPELINES_MAPPING[model_name]) pipe_to = AutoPipelineForImage2Image.from_pipe(pipe_img2img) self.assertIsInstance(pipe_to, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING[model_name]) if "kandinsky" not in model_name: pipe_to = AutoPipelineForInpainting.from_pipe(pipe_img2img) self.assertIsInstance(pipe_to, AUTO_INPAINT_PIPELINES_MAPPING[model_name]) del pipe_img2img, pipe_to gc.collect() # test inpaint if "kandinsky" not in model_name: pipe_inpaint = AutoPipelineForInpainting.from_pretrained( model_repo, variant="fp16", torch_dtype=torch.float16 ) self.assertIsInstance(pipe_inpaint, AUTO_INPAINT_PIPELINES_MAPPING[model_name]) pipe_to = AutoPipelineForText2Image.from_pipe(pipe_inpaint) self.assertIsInstance(pipe_to, AUTO_TEXT2IMAGE_PIPELINES_MAPPING[model_name]) pipe_to = AutoPipelineForImage2Image.from_pipe(pipe_inpaint) self.assertIsInstance(pipe_to, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING[model_name]) pipe_to = AutoPipelineForInpainting.from_pipe(pipe_inpaint) self.assertIsInstance(pipe_to, AUTO_INPAINT_PIPELINES_MAPPING[model_name]) del pipe_inpaint, pipe_to gc.collect() def test_from_pipe_consistent(self): for model_name, model_repo in PRETRAINED_MODEL_REPO_MAPPING.items(): if model_name in ["kandinsky", "kandinsky22"]: auto_pipes = [AutoPipelineForText2Image, AutoPipelineForImage2Image] else: auto_pipes = [AutoPipelineForText2Image, AutoPipelineForImage2Image, AutoPipelineForInpainting] # test from_pretrained for pipe_from_class in auto_pipes: pipe_from = pipe_from_class.from_pretrained(model_repo, variant="fp16", torch_dtype=torch.float16) pipe_from_config = dict(pipe_from.config) for pipe_to_class in auto_pipes: pipe_to = pipe_to_class.from_pipe(pipe_from) self.assertEqual(dict(pipe_to.config), pipe_from_config) del pipe_from, pipe_to gc.collect() def test_controlnet(self): # test from_pretrained model_repo = "runwayml/stable-diffusion-v1-5" controlnet_repo = "lllyasviel/sd-controlnet-canny" controlnet = ControlNetModel.from_pretrained(controlnet_repo, torch_dtype=torch.float16) pipe_txt2img = AutoPipelineForText2Image.from_pretrained( model_repo, controlnet=controlnet, torch_dtype=torch.float16 ) self.assertIsInstance(pipe_txt2img, AUTO_TEXT2IMAGE_PIPELINES_MAPPING["stable-diffusion-controlnet"]) pipe_img2img = AutoPipelineForImage2Image.from_pretrained( model_repo, controlnet=controlnet, torch_dtype=torch.float16 ) self.assertIsInstance(pipe_img2img, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING["stable-diffusion-controlnet"]) pipe_inpaint = AutoPipelineForInpainting.from_pretrained( model_repo, controlnet=controlnet, torch_dtype=torch.float16 ) self.assertIsInstance(pipe_inpaint, AUTO_INPAINT_PIPELINES_MAPPING["stable-diffusion-controlnet"]) # test from_pipe for pipe_from in [pipe_txt2img, pipe_img2img, pipe_inpaint]: pipe_to = AutoPipelineForText2Image.from_pipe(pipe_from) self.assertIsInstance(pipe_to, AUTO_TEXT2IMAGE_PIPELINES_MAPPING["stable-diffusion-controlnet"]) self.assertEqual(dict(pipe_to.config), dict(pipe_txt2img.config)) pipe_to = AutoPipelineForImage2Image.from_pipe(pipe_from) self.assertIsInstance(pipe_to, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING["stable-diffusion-controlnet"]) self.assertEqual(dict(pipe_to.config), dict(pipe_img2img.config)) pipe_to = AutoPipelineForInpainting.from_pipe(pipe_from) self.assertIsInstance(pipe_to, AUTO_INPAINT_PIPELINES_MAPPING["stable-diffusion-controlnet"]) self.assertEqual(dict(pipe_to.config), dict(pipe_inpaint.config))
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/pipelines/test_pipelines_common.py
import contextlib import gc import inspect import io import json import os import re import tempfile import unittest import uuid from typing import Callable, Union import numpy as np import PIL.Image import torch from huggingface_hub import delete_repo from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AsymmetricAutoencoderKL, AutoencoderKL, AutoencoderTiny, ConsistencyDecoderVAE, DDIMScheduler, DiffusionPipeline, StableDiffusionPipeline, UNet2DConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import logging from diffusers.utils.import_utils import is_accelerate_available, is_accelerate_version, is_xformers_available from diffusers.utils.testing_utils import ( CaptureLogger, require_torch, torch_device, ) from ..models.test_models_vae import ( get_asym_autoencoder_kl_config, get_autoencoder_kl_config, get_autoencoder_tiny_config, get_consistency_vae_config, ) from ..others.test_utils import TOKEN, USER, is_staging_test def to_np(tensor): if isinstance(tensor, torch.Tensor): tensor = tensor.detach().cpu().numpy() return tensor def check_same_shape(tensor_list): shapes = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:]) class PipelineLatentTesterMixin: """ This mixin is designed to be used with PipelineTesterMixin and unittest.TestCase classes. It provides a set of common tests for PyTorch pipeline that has vae, e.g. equivalence of different input and output types, etc. """ @property def image_params(self) -> frozenset: raise NotImplementedError( "You need to set the attribute `image_params` in the child test class. " "`image_params` are tested for if all accepted input image types (i.e. `pt`,`pil`,`np`) are producing same results" ) @property def image_latents_params(self) -> frozenset: raise NotImplementedError( "You need to set the attribute `image_latents_params` in the child test class. " "`image_latents_params` are tested for if passing latents directly are producing same results" ) def get_dummy_inputs_by_type(self, device, seed=0, input_image_type="pt", output_type="np"): inputs = self.get_dummy_inputs(device, seed) def convert_to_pt(image): if isinstance(image, torch.Tensor): input_image = image elif isinstance(image, np.ndarray): input_image = VaeImageProcessor.numpy_to_pt(image) elif isinstance(image, PIL.Image.Image): input_image = VaeImageProcessor.pil_to_numpy(image) input_image = VaeImageProcessor.numpy_to_pt(input_image) else: raise ValueError(f"unsupported input_image_type {type(image)}") return input_image def convert_pt_to_type(image, input_image_type): if input_image_type == "pt": input_image = image elif input_image_type == "np": input_image = VaeImageProcessor.pt_to_numpy(image) elif input_image_type == "pil": input_image = VaeImageProcessor.pt_to_numpy(image) input_image = VaeImageProcessor.numpy_to_pil(input_image) else: raise ValueError(f"unsupported input_image_type {input_image_type}.") return input_image for image_param in self.image_params: if image_param in inputs.keys(): inputs[image_param] = convert_pt_to_type( convert_to_pt(inputs[image_param]).to(device), input_image_type ) inputs["output_type"] = output_type return inputs def test_pt_np_pil_outputs_equivalent(self, expected_max_diff=1e-4): self._test_pt_np_pil_outputs_equivalent(expected_max_diff=expected_max_diff) def _test_pt_np_pil_outputs_equivalent(self, expected_max_diff=1e-4, input_image_type="pt"): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) output_pt = pipe( **self.get_dummy_inputs_by_type(torch_device, input_image_type=input_image_type, output_type="pt") )[0] output_np = pipe( **self.get_dummy_inputs_by_type(torch_device, input_image_type=input_image_type, output_type="np") )[0] output_pil = pipe( **self.get_dummy_inputs_by_type(torch_device, input_image_type=input_image_type, output_type="pil") )[0] max_diff = np.abs(output_pt.cpu().numpy().transpose(0, 2, 3, 1) - output_np).max() self.assertLess( max_diff, expected_max_diff, "`output_type=='pt'` generate different results from `output_type=='np'`" ) max_diff = np.abs(np.array(output_pil[0]) - (output_np * 255).round()).max() self.assertLess(max_diff, 2.0, "`output_type=='pil'` generate different results from `output_type=='np'`") def test_pt_np_pil_inputs_equivalent(self): if len(self.image_params) == 0: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) out_input_pt = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="pt"))[0] out_input_np = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="np"))[0] out_input_pil = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="pil"))[0] max_diff = np.abs(out_input_pt - out_input_np).max() self.assertLess(max_diff, 1e-4, "`input_type=='pt'` generate different result from `input_type=='np'`") max_diff = np.abs(out_input_pil - out_input_np).max() self.assertLess(max_diff, 1e-2, "`input_type=='pt'` generate different result from `input_type=='np'`") def test_latents_input(self): if len(self.image_latents_params) == 0: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.image_processor = VaeImageProcessor(do_resize=False, do_normalize=False) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) out = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="pt"))[0] vae = components["vae"] inputs = self.get_dummy_inputs_by_type(torch_device, input_image_type="pt") generator = inputs["generator"] for image_param in self.image_latents_params: if image_param in inputs.keys(): inputs[image_param] = ( vae.encode(inputs[image_param]).latent_dist.sample(generator) * vae.config.scaling_factor ) out_latents_inputs = pipe(**inputs)[0] max_diff = np.abs(out - out_latents_inputs).max() self.assertLess(max_diff, 1e-4, "passing latents as image input generate different result from passing image") def test_multi_vae(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) block_out_channels = pipe.vae.config.block_out_channels norm_num_groups = pipe.vae.config.norm_num_groups vae_classes = [AutoencoderKL, AsymmetricAutoencoderKL, ConsistencyDecoderVAE, AutoencoderTiny] configs = [ get_autoencoder_kl_config(block_out_channels, norm_num_groups), get_asym_autoencoder_kl_config(block_out_channels, norm_num_groups), get_consistency_vae_config(block_out_channels, norm_num_groups), get_autoencoder_tiny_config(block_out_channels), ] out_np = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="np"))[0] for vae_cls, config in zip(vae_classes, configs): vae = vae_cls(**config) vae = vae.to(torch_device) components["vae"] = vae vae_pipe = self.pipeline_class(**components) out_vae_np = vae_pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="np"))[0] assert out_vae_np.shape == out_np.shape @require_torch class PipelineKarrasSchedulerTesterMixin: """ This mixin is designed to be used with unittest.TestCase classes. It provides a set of common tests for each PyTorch pipeline that makes use of KarrasDiffusionSchedulers equivalence of dict and tuple outputs, etc. """ def test_karras_schedulers_shape(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=True) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 2 if "strength" in inputs: inputs["num_inference_steps"] = 4 inputs["strength"] = 0.5 outputs = [] for scheduler_enum in KarrasDiffusionSchedulers: if "KDPM2" in scheduler_enum.name: inputs["num_inference_steps"] = 5 scheduler_cls = getattr(diffusers, scheduler_enum.name) pipe.scheduler = scheduler_cls.from_config(pipe.scheduler.config) output = pipe(**inputs)[0] outputs.append(output) if "KDPM2" in scheduler_enum.name: inputs["num_inference_steps"] = 2 assert check_same_shape(outputs) @require_torch class PipelineTesterMixin: """ This mixin is designed to be used with unittest.TestCase classes. It provides a set of common tests for each PyTorch pipeline, e.g. saving and loading the pipeline, equivalence of dict and tuple outputs, etc. """ # Canonical parameters that are passed to `__call__` regardless # of the type of pipeline. They are always optional and have common # sense default values. required_optional_params = frozenset( [ "num_inference_steps", "num_images_per_prompt", "generator", "latents", "output_type", "return_dict", ] ) # set these parameters to False in the child class if the pipeline does not support the corresponding functionality test_attention_slicing = True test_xformers_attention = True def get_generator(self, seed): device = torch_device if torch_device != "mps" else "cpu" generator = torch.Generator(device).manual_seed(seed) return generator @property def pipeline_class(self) -> Union[Callable, DiffusionPipeline]: raise NotImplementedError( "You need to set the attribute `pipeline_class = ClassNameOfPipeline` in the child test class. " "See existing pipeline tests for reference." ) def get_dummy_components(self): raise NotImplementedError( "You need to implement `get_dummy_components(self)` in the child test class. " "See existing pipeline tests for reference." ) def get_dummy_inputs(self, device, seed=0): raise NotImplementedError( "You need to implement `get_dummy_inputs(self, device, seed)` in the child test class. " "See existing pipeline tests for reference." ) @property def params(self) -> frozenset: raise NotImplementedError( "You need to set the attribute `params` in the child test class. " "`params` are checked for if all values are present in `__call__`'s signature." " You can set `params` using one of the common set of parameters defined in `pipeline_params.py`" " e.g., `TEXT_TO_IMAGE_PARAMS` defines the common parameters used in text to " "image pipelines, including prompts and prompt embedding overrides." "If your pipeline's set of arguments has minor changes from one of the common sets of arguments, " "do not make modifications to the existing common sets of arguments. I.e. a text to image pipeline " "with non-configurable height and width arguments should set the attribute as " "`params = TEXT_TO_IMAGE_PARAMS - {'height', 'width'}`. " "See existing pipeline tests for reference." ) @property def batch_params(self) -> frozenset: raise NotImplementedError( "You need to set the attribute `batch_params` in the child test class. " "`batch_params` are the parameters required to be batched when passed to the pipeline's " "`__call__` method. `pipeline_params.py` provides some common sets of parameters such as " "`TEXT_TO_IMAGE_BATCH_PARAMS`, `IMAGE_VARIATION_BATCH_PARAMS`, etc... If your pipeline's " "set of batch arguments has minor changes from one of the common sets of batch arguments, " "do not make modifications to the existing common sets of batch arguments. I.e. a text to " "image pipeline `negative_prompt` is not batched should set the attribute as " "`batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - {'negative_prompt'}`. " "See existing pipeline tests for reference." ) @property def callback_cfg_params(self) -> frozenset: raise NotImplementedError( "You need to set the attribute `callback_cfg_params` in the child test class that requires to run test_callback_cfg. " "`callback_cfg_params` are the parameters that needs to be passed to the pipeline's callback " "function when dynamically adjusting `guidance_scale`. They are variables that require special" "treatment when `do_classifier_free_guidance` is `True`. `pipeline_params.py` provides some common" " sets of parameters such as `TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS`. If your pipeline's " "set of cfg arguments has minor changes from one of the common sets of cfg arguments, " "do not make modifications to the existing common sets of cfg arguments. I.e. for inpaint pipeine, you " " need to adjust batch size of `mask` and `masked_image_latents` so should set the attribute as" "`callback_cfg_params = TEXT_TO_IMAGE_CFG_PARAMS.union({'mask', 'masked_image_latents'})`" ) def tearDown(self): # clean up the VRAM after each test in case of CUDA runtime errors super().tearDown() gc.collect() torch.cuda.empty_cache() def test_save_load_local(self, expected_max_difference=5e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] logger = logging.get_logger("diffusers.pipelines.pipeline_utils") logger.setLevel(diffusers.logging.INFO) with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir, safe_serialization=False) with CaptureLogger(logger) as cap_logger: pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) for name in pipe_loaded.components.keys(): if name not in pipe_loaded._optional_components: assert name in str(cap_logger) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, expected_max_difference) def test_pipeline_call_signature(self): self.assertTrue( hasattr(self.pipeline_class, "__call__"), f"{self.pipeline_class} should have a `__call__` method" ) parameters = inspect.signature(self.pipeline_class.__call__).parameters optional_parameters = set() for k, v in parameters.items(): if v.default != inspect._empty: optional_parameters.add(k) parameters = set(parameters.keys()) parameters.remove("self") parameters.discard("kwargs") # kwargs can be added if arguments of pipeline call function are deprecated remaining_required_parameters = set() for param in self.params: if param not in parameters: remaining_required_parameters.add(param) self.assertTrue( len(remaining_required_parameters) == 0, f"Required parameters not present: {remaining_required_parameters}", ) remaining_required_optional_parameters = set() for param in self.required_optional_params: if param not in optional_parameters: remaining_required_optional_parameters.add(param) self.assertTrue( len(remaining_required_optional_parameters) == 0, f"Required optional parameters not present: {remaining_required_optional_parameters}", ) def test_inference_batch_consistent(self, batch_sizes=[2]): self._test_inference_batch_consistent(batch_sizes=batch_sizes) def _test_inference_batch_consistent( self, batch_sizes=[2], additional_params_copy_to_batched_inputs=["num_inference_steps"] ): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["generator"] = self.get_generator(0) logger = logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # prepare batched inputs batched_inputs = [] for batch_size in batch_sizes: batched_input = {} batched_input.update(inputs) for name in self.batch_params: if name not in inputs: continue value = inputs[name] if name == "prompt": len_prompt = len(value) # make unequal batch sizes batched_input[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] # make last batch super long batched_input[name][-1] = 100 * "very long" else: batched_input[name] = batch_size * [value] if "generator" in inputs: batched_input["generator"] = [self.get_generator(i) for i in range(batch_size)] if "batch_size" in inputs: batched_input["batch_size"] = batch_size batched_inputs.append(batched_input) logger.setLevel(level=diffusers.logging.WARNING) for batch_size, batched_input in zip(batch_sizes, batched_inputs): output = pipe(**batched_input) assert len(output[0]) == batch_size def test_inference_batch_single_identical(self, batch_size=3, expected_max_diff=1e-4): self._test_inference_batch_single_identical(batch_size=batch_size, expected_max_diff=expected_max_diff) def _test_inference_batch_single_identical( self, batch_size=2, expected_max_diff=1e-4, additional_params_copy_to_batched_inputs=["num_inference_steps"], ): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for components in pipe.components.values(): if hasattr(components, "set_default_attn_processor"): components.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) # Reset generator in case it is has been used in self.get_dummy_inputs inputs["generator"] = self.get_generator(0) logger = logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # batchify inputs batched_inputs = {} batched_inputs.update(inputs) for name in self.batch_params: if name not in inputs: continue value = inputs[name] if name == "prompt": len_prompt = len(value) batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] batched_inputs[name][-1] = 100 * "very long" else: batched_inputs[name] = batch_size * [value] if "generator" in inputs: batched_inputs["generator"] = [self.get_generator(i) for i in range(batch_size)] if "batch_size" in inputs: batched_inputs["batch_size"] = batch_size for arg in additional_params_copy_to_batched_inputs: batched_inputs[arg] = inputs[arg] output = pipe(**inputs) output_batch = pipe(**batched_inputs) assert output_batch[0].shape[0] == batch_size max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff def test_dict_tuple_outputs_equivalent(self, expected_max_difference=1e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" output = pipe(**self.get_dummy_inputs(generator_device))[0] output_tuple = pipe(**self.get_dummy_inputs(generator_device), return_dict=False)[0] max_diff = np.abs(to_np(output) - to_np(output_tuple)).max() self.assertLess(max_diff, expected_max_difference) def test_components_function(self): init_components = self.get_dummy_components() init_components = {k: v for k, v in init_components.items() if not isinstance(v, (str, int, float))} pipe = self.pipeline_class(**init_components) self.assertTrue(hasattr(pipe, "components")) self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_float16_inference(self, expected_max_diff=5e-2): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) components = self.get_dummy_components() pipe_fp16 = self.pipeline_class(**components) for component in pipe_fp16.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_fp16.to(torch_device, torch.float16) pipe_fp16.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) # Reset generator in case it is used inside dummy inputs if "generator" in inputs: inputs["generator"] = self.get_generator(0) output = pipe(**inputs)[0] fp16_inputs = self.get_dummy_inputs(torch_device) # Reset generator in case it is used inside dummy inputs if "generator" in fp16_inputs: fp16_inputs["generator"] = self.get_generator(0) output_fp16 = pipe_fp16(**fp16_inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_fp16)).max() self.assertLess(max_diff, expected_max_diff, "The outputs of the fp16 and fp32 pipelines are too different.") @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_save_load_float16(self, expected_max_diff=1e-2): components = self.get_dummy_components() for name, module in components.items(): if hasattr(module, "half"): components[name] = module.to(torch_device).half() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16) for component in pipe_loaded.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for name, component in pipe_loaded.components.items(): if hasattr(component, "dtype"): self.assertTrue( component.dtype == torch.float16, f"`{name}.dtype` switched from `float16` to {component.dtype} after loading.", ) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess( max_diff, expected_max_diff, "The output of the fp16 pipeline changed after saving and loading." ) def test_save_load_optional_components(self, expected_max_difference=1e-4): if not hasattr(self.pipeline_class, "_optional_components"): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # set all optional components to None for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir, safe_serialization=False) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) for component in pipe_loaded.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(generator_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, expected_max_difference) @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to("cpu") model_devices = [component.device.type for component in components.values() if hasattr(component, "device")] self.assertTrue(all(device == "cpu" for device in model_devices)) output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] self.assertTrue(np.isnan(output_cpu).sum() == 0) pipe.to("cuda") model_devices = [component.device.type for component in components.values() if hasattr(component, "device")] self.assertTrue(all(device == "cuda" for device in model_devices)) output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) def test_to_dtype(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) model_dtypes = [component.dtype for component in components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes)) pipe.to(torch_dtype=torch.float16) model_dtypes = [component.dtype for component in components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) def test_attention_slicing_forward_pass(self, expected_max_diff=1e-3): self._test_attention_slicing_forward_pass(expected_max_diff=expected_max_diff) def _test_attention_slicing_forward_pass( self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 ): if not self.test_attention_slicing: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output_without_slicing = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=1) inputs = self.get_dummy_inputs(generator_device) output_with_slicing = pipe(**inputs)[0] if test_max_difference: max_diff = np.abs(to_np(output_with_slicing) - to_np(output_without_slicing)).max() self.assertLess(max_diff, expected_max_diff, "Attention slicing should not affect the inference results") if test_mean_pixel_difference: assert_mean_pixel_difference(to_np(output_with_slicing[0]), to_np(output_without_slicing[0])) @unittest.skipIf( torch_device != "cuda" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), reason="CPU offload is only available with CUDA and `accelerate v0.14.0` or higher", ) def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output_without_offload = pipe(**inputs)[0] pipe.enable_sequential_cpu_offload() inputs = self.get_dummy_inputs(generator_device) output_with_offload = pipe(**inputs)[0] max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, expected_max_diff, "CPU offloading should not affect the inference results") @unittest.skipIf( torch_device != "cuda" or not is_accelerate_available() or is_accelerate_version("<", "0.17.0"), reason="CPU offload is only available with CUDA and `accelerate v0.17.0` or higher", ) def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): generator_device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(generator_device) output_without_offload = pipe(**inputs)[0] pipe.enable_model_cpu_offload() inputs = self.get_dummy_inputs(generator_device) output_with_offload = pipe(**inputs)[0] max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, expected_max_diff, "CPU offloading should not affect the inference results") offloaded_modules = [ v for k, v in pipe.components.items() if isinstance(v, torch.nn.Module) and k not in pipe._exclude_from_cpu_offload ] ( self.assertTrue(all(v.device.type == "cpu" for v in offloaded_modules)), f"Not offloaded: {[v for v in offloaded_modules if v.device.type != 'cpu']}", ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass() def _test_xformers_attention_forwardGenerator_pass( self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-4 ): if not self.test_xformers_attention: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_without_offload = pipe(**inputs)[0] output_without_offload = ( output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload ) pipe.enable_xformers_memory_efficient_attention() inputs = self.get_dummy_inputs(torch_device) output_with_offload = pipe(**inputs)[0] output_with_offload = ( output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload ) if test_max_difference: max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, expected_max_diff, "XFormers attention should not affect the inference results") if test_mean_pixel_difference: assert_mean_pixel_difference(output_with_offload[0], output_without_offload[0]) def test_progress_bar(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) inputs = self.get_dummy_inputs(torch_device) with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): _ = pipe(**inputs) stderr = stderr.getvalue() # we can't calculate the number of progress steps beforehand e.g. for strength-dependent img2img, # so we just match "5" in "#####| 1/5 [00:01<00:00]" max_steps = re.search("/(.*?) ", stderr).group(1) self.assertTrue(max_steps is not None and len(max_steps) > 0) self.assertTrue( f"{max_steps}/{max_steps}" in stderr, "Progress bar should be enabled and stopped at the max step" ) pipe.set_progress_bar_config(disable=True) with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): _ = pipe(**inputs) self.assertTrue(stderr.getvalue() == "", "Progress bar should be disabled") def test_num_images_per_prompt(self): sig = inspect.signature(self.pipeline_class.__call__) if "num_images_per_prompt" not in sig.parameters: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) batch_sizes = [1, 2] num_images_per_prompts = [1, 2] for batch_size in batch_sizes: for num_images_per_prompt in num_images_per_prompts: inputs = self.get_dummy_inputs(torch_device) for key in inputs.keys(): if key in self.batch_params: inputs[key] = batch_size * [inputs[key]] images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0] assert images.shape[0] == batch_size * num_images_per_prompt def test_cfg(self): sig = inspect.signature(self.pipeline_class.__call__) if "guidance_scale" not in sig.parameters: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["guidance_scale"] = 1.0 out_no_cfg = pipe(**inputs)[0] inputs["guidance_scale"] = 7.5 out_cfg = pipe(**inputs)[0] assert out_cfg.shape == out_no_cfg.shape def test_callback_inputs(self): sig = inspect.signature(self.pipeline_class.__call__) has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters has_callback_step_end = "callback_on_step_end" in sig.parameters if not (has_callback_tensor_inputs and has_callback_step_end): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_subset(pipe, i, t, callback_kwargs): # interate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs def callback_inputs_all(pipe, i, t, callback_kwargs): for tensor_name in pipe._callback_tensor_inputs: assert tensor_name in callback_kwargs # interate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs inputs = self.get_dummy_inputs(torch_device) # Test passing in a subset inputs["callback_on_step_end"] = callback_inputs_subset inputs["callback_on_step_end_tensor_inputs"] = ["latents"] inputs["output_type"] = "latent" output = pipe(**inputs)[0] # Test passing in a everything inputs["callback_on_step_end"] = callback_inputs_all inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs inputs["output_type"] = "latent" output = pipe(**inputs)[0] def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): is_last = i == (pipe.num_timesteps - 1) if is_last: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) return callback_kwargs inputs["callback_on_step_end"] = callback_inputs_change_tensor inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs inputs["output_type"] = "latent" output = pipe(**inputs)[0] assert output.abs().sum() == 0 def test_callback_cfg(self): sig = inspect.signature(self.pipeline_class.__call__) has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters has_callback_step_end = "callback_on_step_end" in sig.parameters if not (has_callback_tensor_inputs and has_callback_step_end): return if "guidance_scale" not in sig.parameters: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_increase_guidance(pipe, i, t, callback_kwargs): pipe._guidance_scale += 1.0 return callback_kwargs inputs = self.get_dummy_inputs(torch_device) # use cfg guidance because some pipelines modify the shape of the latents # outside of the denoising loop inputs["guidance_scale"] = 2.0 inputs["callback_on_step_end"] = callback_increase_guidance inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs _ = pipe(**inputs)[0] # we increase the guidance scale by 1.0 at every step # check that the guidance scale is increased by the number of scheduler timesteps # accounts for models that modify the number of inference steps based on strength assert pipe.guidance_scale == (inputs["guidance_scale"] + pipe.num_timesteps) @is_staging_test class PipelinePushToHubTester(unittest.TestCase): identifier = uuid.uuid4() repo_id = f"test-pipeline-{identifier}" org_repo_id = f"valid_org/{repo_id}-org" def get_pipeline_components(self): unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) with tempfile.TemporaryDirectory() as tmpdir: dummy_vocab = {"<|startoftext|>": 0, "<|endoftext|>": 1, "!": 2} vocab_path = os.path.join(tmpdir, "vocab.json") with open(vocab_path, "w") as f: json.dump(dummy_vocab, f) merges = "Ġ t\nĠt h" merges_path = os.path.join(tmpdir, "merges.txt") with open(merges_path, "w") as f: f.writelines(merges) tokenizer = CLIPTokenizer(vocab_file=vocab_path, merges_file=merges_path) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def test_push_to_hub(self): components = self.get_pipeline_components() pipeline = StableDiffusionPipeline(**components) pipeline.push_to_hub(self.repo_id, token=TOKEN) new_model = UNet2DConditionModel.from_pretrained(f"{USER}/{self.repo_id}", subfolder="unet") unet = components["unet"] for p1, p2 in zip(unet.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(token=TOKEN, repo_id=self.repo_id) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: pipeline.save_pretrained(tmp_dir, repo_id=self.repo_id, push_to_hub=True, token=TOKEN) new_model = UNet2DConditionModel.from_pretrained(f"{USER}/{self.repo_id}", subfolder="unet") for p1, p2 in zip(unet.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(self.repo_id, token=TOKEN) def test_push_to_hub_in_organization(self): components = self.get_pipeline_components() pipeline = StableDiffusionPipeline(**components) pipeline.push_to_hub(self.org_repo_id, token=TOKEN) new_model = UNet2DConditionModel.from_pretrained(self.org_repo_id, subfolder="unet") unet = components["unet"] for p1, p2 in zip(unet.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(token=TOKEN, repo_id=self.org_repo_id) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: pipeline.save_pretrained(tmp_dir, push_to_hub=True, token=TOKEN, repo_id=self.org_repo_id) new_model = UNet2DConditionModel.from_pretrained(self.org_repo_id, subfolder="unet") for p1, p2 in zip(unet.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(self.org_repo_id, token=TOKEN) # For SDXL and its derivative pipelines (such as ControlNet), we have the text encoders # and the tokenizers as optional components. So, we need to override the `test_save_load_optional_components()` # test for all such pipelines. This requires us to use a custom `encode_prompt()` function. class SDXLOptionalComponentsTesterMixin: def encode_prompt( self, tokenizers, text_encoders, prompt: str, num_images_per_prompt: int = 1, negative_prompt: str = None ): device = text_encoders[0].device if isinstance(prompt, str): prompt = [prompt] batch_size = len(prompt) prompt_embeds_list = [] for tokenizer, text_encoder in zip(tokenizers, text_encoders): text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds[0] prompt_embeds = prompt_embeds.hidden_states[-2] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) if negative_prompt is None: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) else: negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_embeds_list = [] for tokenizer, text_encoder in zip(tokenizers, text_encoders): uncond_input = tokenizer( negative_prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds = text_encoder(uncond_input.input_ids.to(device), output_hidden_states=True) negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # for classifier-free guidance # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) # for classifier-free guidance negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds def _test_save_load_optional_components(self, expected_max_difference=1e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) tokenizer = components.pop("tokenizer") tokenizer_2 = components.pop("tokenizer_2") text_encoder = components.pop("text_encoder") text_encoder_2 = components.pop("text_encoder_2") tokenizers = [tokenizer, tokenizer_2] if tokenizer is not None else [tokenizer_2] text_encoders = [text_encoder, text_encoder_2] if text_encoder is not None else [text_encoder_2] prompt = inputs.pop("prompt") ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = self.encode_prompt(tokenizers, text_encoders, prompt) inputs["prompt_embeds"] = prompt_embeds inputs["negative_prompt_embeds"] = negative_prompt_embeds inputs["pooled_prompt_embeds"] = pooled_prompt_embeds inputs["negative_pooled_prompt_embeds"] = negative_pooled_prompt_embeds output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) for component in pipe_loaded.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(generator_device) _ = inputs.pop("prompt") inputs["prompt_embeds"] = prompt_embeds inputs["negative_prompt_embeds"] = negative_prompt_embeds inputs["pooled_prompt_embeds"] = pooled_prompt_embeds inputs["negative_pooled_prompt_embeds"] = negative_pooled_prompt_embeds output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, expected_max_difference) # Some models (e.g. unCLIP) are extremely likely to significantly deviate depending on which hardware is used. # This helper function is used to check that the image doesn't deviate on average more than 10 pixels from a # reference image. def assert_mean_pixel_difference(image, expected_image, expected_max_diff=10): image = np.asarray(DiffusionPipeline.numpy_to_pil(image)[0], dtype=np.float32) expected_image = np.asarray(DiffusionPipeline.numpy_to_pil(expected_image)[0], dtype=np.float32) avg_diff = np.abs(image - expected_image).mean() assert avg_diff < expected_max_diff, f"Error image deviates {avg_diff} pixels on average"
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/pipelines/test_pipelines.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import json import os import random import shutil import sys import tempfile import traceback import unittest import unittest.mock as mock import numpy as np import PIL.Image import requests_mock import safetensors.torch import torch from parameterized import parameterized from PIL import Image from requests.exceptions import HTTPError from transformers import CLIPImageProcessor, CLIPModel, CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ConfigMixin, DDIMPipeline, DDIMScheduler, DDPMPipeline, DDPMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, ModelMixin, PNDMScheduler, StableDiffusionImg2ImgPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionPipeline, UNet2DConditionModel, UNet2DModel, UniPCMultistepScheduler, logging, ) from diffusers.pipelines.pipeline_utils import _get_pipeline_class from diffusers.schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME from diffusers.utils import ( CONFIG_NAME, WEIGHTS_NAME, ) from diffusers.utils.testing_utils import ( CaptureLogger, enable_full_determinism, floats_tensor, get_tests_dir, load_numpy, nightly, require_compel, require_flax, require_onnxruntime, require_python39_or_higher, require_torch_2, require_torch_gpu, run_test_in_subprocess, slow, torch_device, ) from diffusers.utils.torch_utils import is_compiled_module enable_full_determinism() # Will be run via run_test_in_subprocess def _test_from_save_pretrained_dynamo(in_queue, out_queue, timeout): error = None try: # 1. Load models model = UNet2DModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), ) model = torch.compile(model) scheduler = DDPMScheduler(num_train_timesteps=10) ddpm = DDPMPipeline(model, scheduler) # previous diffusers versions stripped compilation off # compiled modules assert is_compiled_module(ddpm.unet) ddpm.to(torch_device) ddpm.set_progress_bar_config(disable=None) with tempfile.TemporaryDirectory() as tmpdirname: ddpm.save_pretrained(tmpdirname) new_ddpm = DDPMPipeline.from_pretrained(tmpdirname) new_ddpm.to(torch_device) generator = torch.Generator(device=torch_device).manual_seed(0) image = ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images generator = torch.Generator(device=torch_device).manual_seed(0) new_image = new_ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass" except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() class CustomEncoder(ModelMixin, ConfigMixin): def __init__(self): super().__init__() class CustomPipeline(DiffusionPipeline): def __init__(self, encoder: CustomEncoder, scheduler: DDIMScheduler): super().__init__() self.register_modules(encoder=encoder, scheduler=scheduler) class DownloadTests(unittest.TestCase): def test_one_request_upon_cached(self): # TODO: For some reason this test fails on MPS where no HEAD call is made. if torch_device == "mps": return with tempfile.TemporaryDirectory() as tmpdirname: with requests_mock.mock(real_http=True) as m: DiffusionPipeline.download("hf-internal-testing/tiny-stable-diffusion-pipe", cache_dir=tmpdirname) download_requests = [r.method for r in m.request_history] assert download_requests.count("HEAD") == 15, "15 calls to files" assert download_requests.count("GET") == 17, "15 calls to files + model_info + model_index.json" assert ( len(download_requests) == 32 ), "2 calls per file (15 files) + send_telemetry, model_info and model_index.json" with requests_mock.mock(real_http=True) as m: DiffusionPipeline.download( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname ) cache_requests = [r.method for r in m.request_history] assert cache_requests.count("HEAD") == 1, "model_index.json is only HEAD" assert cache_requests.count("GET") == 1, "model info is only GET" assert ( len(cache_requests) == 2 ), "We should call only `model_info` to check for _commit hash and `send_telemetry`" def test_less_downloads_passed_object(self): with tempfile.TemporaryDirectory() as tmpdirname: cached_folder = DiffusionPipeline.download( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname ) # make sure safety checker is not downloaded assert "safety_checker" not in os.listdir(cached_folder) # make sure rest is downloaded assert "unet" in os.listdir(cached_folder) assert "tokenizer" in os.listdir(cached_folder) assert "vae" in os.listdir(cached_folder) assert "model_index.json" in os.listdir(cached_folder) assert "scheduler" in os.listdir(cached_folder) assert "feature_extractor" in os.listdir(cached_folder) def test_less_downloads_passed_object_calls(self): # TODO: For some reason this test fails on MPS where no HEAD call is made. if torch_device == "mps": return with tempfile.TemporaryDirectory() as tmpdirname: with requests_mock.mock(real_http=True) as m: DiffusionPipeline.download( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname ) download_requests = [r.method for r in m.request_history] # 15 - 2 because no call to config or model file for `safety_checker` assert download_requests.count("HEAD") == 13, "13 calls to files" # 17 - 2 because no call to config or model file for `safety_checker` assert download_requests.count("GET") == 15, "13 calls to files + model_info + model_index.json" assert ( len(download_requests) == 28 ), "2 calls per file (13 files) + send_telemetry, model_info and model_index.json" with requests_mock.mock(real_http=True) as m: DiffusionPipeline.download( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname ) cache_requests = [r.method for r in m.request_history] assert cache_requests.count("HEAD") == 1, "model_index.json is only HEAD" assert cache_requests.count("GET") == 1, "model info is only GET" assert ( len(cache_requests) == 2 ), "We should call only `model_info` to check for _commit hash and `send_telemetry`" def test_download_only_pytorch(self): with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights tmpdirname = DiffusionPipeline.download( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname ) all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))] files = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a flax file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_flax_model.msgpack assert not any(f.endswith(".msgpack") for f in files) # We need to never convert this tiny model to safetensors for this test to pass assert not any(f.endswith(".safetensors") for f in files) def test_force_safetensors_error(self): with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights with self.assertRaises(EnvironmentError): tmpdirname = DiffusionPipeline.download( "hf-internal-testing/tiny-stable-diffusion-pipe-no-safetensors", safety_checker=None, cache_dir=tmpdirname, use_safetensors=True, ) def test_download_safetensors(self): with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights tmpdirname = DiffusionPipeline.download( "hf-internal-testing/tiny-stable-diffusion-pipe-safetensors", safety_checker=None, cache_dir=tmpdirname, ) all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))] files = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a pytorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_flax_model.msgpack assert not any(f.endswith(".bin") for f in files) def test_download_safetensors_index(self): for variant in ["fp16", None]: with tempfile.TemporaryDirectory() as tmpdirname: tmpdirname = DiffusionPipeline.download( "hf-internal-testing/tiny-stable-diffusion-pipe-indexes", cache_dir=tmpdirname, use_safetensors=True, variant=variant, ) all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))] files = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a safetensors file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe-indexes/tree/main/text_encoder if variant is None: assert not any("fp16" in f for f in files) else: model_files = [f for f in files if "safetensors" in f] assert all("fp16" in f for f in model_files) assert len([f for f in files if ".safetensors" in f]) == 8 assert not any(".bin" in f for f in files) def test_download_bin_index(self): for variant in ["fp16", None]: with tempfile.TemporaryDirectory() as tmpdirname: tmpdirname = DiffusionPipeline.download( "hf-internal-testing/tiny-stable-diffusion-pipe-indexes", cache_dir=tmpdirname, use_safetensors=False, variant=variant, ) all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))] files = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a safetensors file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe-indexes/tree/main/text_encoder if variant is None: assert not any("fp16" in f for f in files) else: model_files = [f for f in files if "bin" in f] assert all("fp16" in f for f in model_files) assert len([f for f in files if ".bin" in f]) == 8 assert not any(".safetensors" in f for f in files) def test_download_no_openvino_by_default(self): with tempfile.TemporaryDirectory() as tmpdirname: tmpdirname = DiffusionPipeline.download( "hf-internal-testing/tiny-stable-diffusion-open-vino", cache_dir=tmpdirname, ) all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))] files = [item for sublist in all_root_files for item in sublist] # make sure that by default no openvino weights are downloaded assert all((f.endswith(".json") or f.endswith(".bin") or f.endswith(".txt")) for f in files) assert not any("openvino_" in f for f in files) def test_download_no_onnx_by_default(self): with tempfile.TemporaryDirectory() as tmpdirname: tmpdirname = DiffusionPipeline.download( "hf-internal-testing/tiny-stable-diffusion-xl-pipe", cache_dir=tmpdirname, use_safetensors=False, ) all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))] files = [item for sublist in all_root_files for item in sublist] # make sure that by default no onnx weights are downloaded for non-ONNX pipelines assert all((f.endswith(".json") or f.endswith(".bin") or f.endswith(".txt")) for f in files) assert not any((f.endswith(".onnx") or f.endswith(".pb")) for f in files) @require_onnxruntime def test_download_onnx_by_default_for_onnx_pipelines(self): with tempfile.TemporaryDirectory() as tmpdirname: tmpdirname = DiffusionPipeline.download( "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline", cache_dir=tmpdirname, ) all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))] files = [item for sublist in all_root_files for item in sublist] # make sure that by default onnx weights are downloaded for ONNX pipelines assert any((f.endswith(".json") or f.endswith(".bin") or f.endswith(".txt")) for f in files) assert any((f.endswith(".onnx")) for f in files) assert any((f.endswith(".pb")) for f in files) def test_download_no_safety_checker(self): prompt = "hello" pipe = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None ) pipe = pipe.to(torch_device) generator = torch.manual_seed(0) out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images pipe_2 = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch") pipe_2 = pipe_2.to(torch_device) generator = torch.manual_seed(0) out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images assert np.max(np.abs(out - out_2)) < 1e-3 def test_load_no_safety_checker_explicit_locally(self): prompt = "hello" pipe = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None ) pipe = pipe.to(torch_device) generator = torch.manual_seed(0) out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(tmpdirname) pipe_2 = StableDiffusionPipeline.from_pretrained(tmpdirname, safety_checker=None) pipe_2 = pipe_2.to(torch_device) generator = torch.manual_seed(0) out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images assert np.max(np.abs(out - out_2)) < 1e-3 def test_load_no_safety_checker_default_locally(self): prompt = "hello" pipe = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch") pipe = pipe.to(torch_device) generator = torch.manual_seed(0) out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(tmpdirname) pipe_2 = StableDiffusionPipeline.from_pretrained(tmpdirname) pipe_2 = pipe_2.to(torch_device) generator = torch.manual_seed(0) out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images assert np.max(np.abs(out - out_2)) < 1e-3 def test_cached_files_are_used_when_no_internet(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. orig_pipe = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None ) orig_comps = {k: v for k, v in orig_pipe.components.items() if hasattr(v, "parameters")} # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.request", return_value=response_mock): # Download this model to make sure it's in the cache. pipe = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None ) comps = {k: v for k, v in pipe.components.items() if hasattr(v, "parameters")} for m1, m2 in zip(orig_comps.values(), comps.values()): for p1, p2 in zip(m1.parameters(), m2.parameters()): if p1.data.ne(p2.data).sum() > 0: assert False, "Parameters not the same!" def test_local_files_only_are_used_when_no_internet(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # first check that with local files only the pipeline can only be used if cached with self.assertRaises(FileNotFoundError): with tempfile.TemporaryDirectory() as tmpdirname: orig_pipe = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", local_files_only=True, cache_dir=tmpdirname ) # now download orig_pipe = DiffusionPipeline.download("hf-internal-testing/tiny-stable-diffusion-torch") # make sure it can be loaded with local_files_only orig_pipe = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", local_files_only=True ) orig_comps = {k: v for k, v in orig_pipe.components.items() if hasattr(v, "parameters")} # Under the mock environment we get a 500 error when trying to connect to the internet. # Make sure it works local_files_only only works here! with mock.patch("requests.request", return_value=response_mock): # Download this model to make sure it's in the cache. pipe = DiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch") comps = {k: v for k, v in pipe.components.items() if hasattr(v, "parameters")} for m1, m2 in zip(orig_comps.values(), comps.values()): for p1, p2 in zip(m1.parameters(), m2.parameters()): if p1.data.ne(p2.data).sum() > 0: assert False, "Parameters not the same!" def test_download_from_variant_folder(self): for use_safetensors in [False, True]: other_format = ".bin" if use_safetensors else ".safetensors" with tempfile.TemporaryDirectory() as tmpdirname: tmpdirname = StableDiffusionPipeline.download( "hf-internal-testing/stable-diffusion-all-variants", cache_dir=tmpdirname, use_safetensors=use_safetensors, ) all_root_files = [t[-1] for t in os.walk(tmpdirname)] files = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a variant file even if we have some here: # https://huggingface.co/hf-internal-testing/stable-diffusion-all-variants/tree/main/unet assert len(files) == 15, f"We should only download 15 files, not {len(files)}" assert not any(f.endswith(other_format) for f in files) # no variants assert not any(len(f.split(".")) == 3 for f in files) def test_download_variant_all(self): for use_safetensors in [False, True]: other_format = ".bin" if use_safetensors else ".safetensors" this_format = ".safetensors" if use_safetensors else ".bin" variant = "fp16" with tempfile.TemporaryDirectory() as tmpdirname: tmpdirname = StableDiffusionPipeline.download( "hf-internal-testing/stable-diffusion-all-variants", cache_dir=tmpdirname, variant=variant, use_safetensors=use_safetensors, ) all_root_files = [t[-1] for t in os.walk(tmpdirname)] files = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a non-variant file even if we have some here: # https://huggingface.co/hf-internal-testing/stable-diffusion-all-variants/tree/main/unet assert len(files) == 15, f"We should only download 15 files, not {len(files)}" # unet, vae, text_encoder, safety_checker assert len([f for f in files if f.endswith(f"{variant}{this_format}")]) == 4 # all checkpoints should have variant ending assert not any(f.endswith(this_format) and not f.endswith(f"{variant}{this_format}") for f in files) assert not any(f.endswith(other_format) for f in files) def test_download_variant_partly(self): for use_safetensors in [False, True]: other_format = ".bin" if use_safetensors else ".safetensors" this_format = ".safetensors" if use_safetensors else ".bin" variant = "no_ema" with tempfile.TemporaryDirectory() as tmpdirname: tmpdirname = StableDiffusionPipeline.download( "hf-internal-testing/stable-diffusion-all-variants", cache_dir=tmpdirname, variant=variant, use_safetensors=use_safetensors, ) all_root_files = [t[-1] for t in os.walk(tmpdirname)] files = [item for sublist in all_root_files for item in sublist] unet_files = os.listdir(os.path.join(tmpdirname, "unet")) # Some of the downloaded files should be a non-variant file, check: # https://huggingface.co/hf-internal-testing/stable-diffusion-all-variants/tree/main/unet assert len(files) == 15, f"We should only download 15 files, not {len(files)}" # only unet has "no_ema" variant assert f"diffusion_pytorch_model.{variant}{this_format}" in unet_files assert len([f for f in files if f.endswith(f"{variant}{this_format}")]) == 1 # vae, safety_checker and text_encoder should have no variant assert sum(f.endswith(this_format) and not f.endswith(f"{variant}{this_format}") for f in files) == 3 assert not any(f.endswith(other_format) for f in files) def test_download_broken_variant(self): for use_safetensors in [False, True]: # text encoder is missing no variant and "no_ema" variant weights, so the following can't work for variant in [None, "no_ema"]: with self.assertRaises(OSError) as error_context: with tempfile.TemporaryDirectory() as tmpdirname: tmpdirname = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/stable-diffusion-broken-variants", cache_dir=tmpdirname, variant=variant, use_safetensors=use_safetensors, ) assert "Error no file name" in str(error_context.exception) # text encoder has fp16 variants so we can load it with tempfile.TemporaryDirectory() as tmpdirname: tmpdirname = StableDiffusionPipeline.download( "hf-internal-testing/stable-diffusion-broken-variants", use_safetensors=use_safetensors, cache_dir=tmpdirname, variant="fp16", ) all_root_files = [t[-1] for t in os.walk(tmpdirname)] files = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a non-variant file even if we have some here: # https://huggingface.co/hf-internal-testing/stable-diffusion-broken-variants/tree/main/unet assert len(files) == 15, f"We should only download 15 files, not {len(files)}" # only unet has "no_ema" variant def test_local_save_load_index(self): prompt = "hello" for variant in [None, "fp16"]: for use_safe in [True, False]: pipe = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe-indexes", variant=variant, use_safetensors=use_safe, safety_checker=None, ) pipe = pipe.to(torch_device) generator = torch.manual_seed(0) out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(tmpdirname) pipe_2 = StableDiffusionPipeline.from_pretrained( tmpdirname, safe_serialization=use_safe, variant=variant ) pipe_2 = pipe_2.to(torch_device) generator = torch.manual_seed(0) out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images assert np.max(np.abs(out - out_2)) < 1e-3 def test_text_inversion_download(self): pipe = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None ) pipe = pipe.to(torch_device) num_tokens = len(pipe.tokenizer) # single token load local with tempfile.TemporaryDirectory() as tmpdirname: ten = {"<*>": torch.ones((32,))} torch.save(ten, os.path.join(tmpdirname, "learned_embeds.bin")) pipe.load_textual_inversion(tmpdirname) token = pipe.tokenizer.convert_tokens_to_ids("<*>") assert token == num_tokens, "Added token must be at spot `num_tokens`" assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 32 assert pipe._maybe_convert_prompt("<*>", pipe.tokenizer) == "<*>" prompt = "hey <*>" out = pipe(prompt, num_inference_steps=1, output_type="numpy").images assert out.shape == (1, 128, 128, 3) # single token load local with weight name with tempfile.TemporaryDirectory() as tmpdirname: ten = {"<**>": 2 * torch.ones((1, 32))} torch.save(ten, os.path.join(tmpdirname, "learned_embeds.bin")) pipe.load_textual_inversion(tmpdirname, weight_name="learned_embeds.bin") token = pipe.tokenizer.convert_tokens_to_ids("<**>") assert token == num_tokens + 1, "Added token must be at spot `num_tokens`" assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 64 assert pipe._maybe_convert_prompt("<**>", pipe.tokenizer) == "<**>" prompt = "hey <**>" out = pipe(prompt, num_inference_steps=1, output_type="numpy").images assert out.shape == (1, 128, 128, 3) # multi token load with tempfile.TemporaryDirectory() as tmpdirname: ten = {"<***>": torch.cat([3 * torch.ones((1, 32)), 4 * torch.ones((1, 32)), 5 * torch.ones((1, 32))])} torch.save(ten, os.path.join(tmpdirname, "learned_embeds.bin")) pipe.load_textual_inversion(tmpdirname) token = pipe.tokenizer.convert_tokens_to_ids("<***>") token_1 = pipe.tokenizer.convert_tokens_to_ids("<***>_1") token_2 = pipe.tokenizer.convert_tokens_to_ids("<***>_2") assert token == num_tokens + 2, "Added token must be at spot `num_tokens`" assert token_1 == num_tokens + 3, "Added token must be at spot `num_tokens`" assert token_2 == num_tokens + 4, "Added token must be at spot `num_tokens`" assert pipe.text_encoder.get_input_embeddings().weight[-3].sum().item() == 96 assert pipe.text_encoder.get_input_embeddings().weight[-2].sum().item() == 128 assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 160 assert pipe._maybe_convert_prompt("<***>", pipe.tokenizer) == "<***> <***>_1 <***>_2" prompt = "hey <***>" out = pipe(prompt, num_inference_steps=1, output_type="numpy").images assert out.shape == (1, 128, 128, 3) # multi token load a1111 with tempfile.TemporaryDirectory() as tmpdirname: ten = { "string_to_param": { "*": torch.cat([3 * torch.ones((1, 32)), 4 * torch.ones((1, 32)), 5 * torch.ones((1, 32))]) }, "name": "<****>", } torch.save(ten, os.path.join(tmpdirname, "a1111.bin")) pipe.load_textual_inversion(tmpdirname, weight_name="a1111.bin") token = pipe.tokenizer.convert_tokens_to_ids("<****>") token_1 = pipe.tokenizer.convert_tokens_to_ids("<****>_1") token_2 = pipe.tokenizer.convert_tokens_to_ids("<****>_2") assert token == num_tokens + 5, "Added token must be at spot `num_tokens`" assert token_1 == num_tokens + 6, "Added token must be at spot `num_tokens`" assert token_2 == num_tokens + 7, "Added token must be at spot `num_tokens`" assert pipe.text_encoder.get_input_embeddings().weight[-3].sum().item() == 96 assert pipe.text_encoder.get_input_embeddings().weight[-2].sum().item() == 128 assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 160 assert pipe._maybe_convert_prompt("<****>", pipe.tokenizer) == "<****> <****>_1 <****>_2" prompt = "hey <****>" out = pipe(prompt, num_inference_steps=1, output_type="numpy").images assert out.shape == (1, 128, 128, 3) # multi embedding load with tempfile.TemporaryDirectory() as tmpdirname1: with tempfile.TemporaryDirectory() as tmpdirname2: ten = {"<*****>": torch.ones((32,))} torch.save(ten, os.path.join(tmpdirname1, "learned_embeds.bin")) ten = {"<******>": 2 * torch.ones((1, 32))} torch.save(ten, os.path.join(tmpdirname2, "learned_embeds.bin")) pipe.load_textual_inversion([tmpdirname1, tmpdirname2]) token = pipe.tokenizer.convert_tokens_to_ids("<*****>") assert token == num_tokens + 8, "Added token must be at spot `num_tokens`" assert pipe.text_encoder.get_input_embeddings().weight[-2].sum().item() == 32 assert pipe._maybe_convert_prompt("<*****>", pipe.tokenizer) == "<*****>" token = pipe.tokenizer.convert_tokens_to_ids("<******>") assert token == num_tokens + 9, "Added token must be at spot `num_tokens`" assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 64 assert pipe._maybe_convert_prompt("<******>", pipe.tokenizer) == "<******>" prompt = "hey <*****> <******>" out = pipe(prompt, num_inference_steps=1, output_type="numpy").images assert out.shape == (1, 128, 128, 3) # single token state dict load ten = {"<x>": torch.ones((32,))} pipe.load_textual_inversion(ten) token = pipe.tokenizer.convert_tokens_to_ids("<x>") assert token == num_tokens + 10, "Added token must be at spot `num_tokens`" assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 32 assert pipe._maybe_convert_prompt("<x>", pipe.tokenizer) == "<x>" prompt = "hey <x>" out = pipe(prompt, num_inference_steps=1, output_type="numpy").images assert out.shape == (1, 128, 128, 3) # multi embedding state dict load ten1 = {"<xxxxx>": torch.ones((32,))} ten2 = {"<xxxxxx>": 2 * torch.ones((1, 32))} pipe.load_textual_inversion([ten1, ten2]) token = pipe.tokenizer.convert_tokens_to_ids("<xxxxx>") assert token == num_tokens + 11, "Added token must be at spot `num_tokens`" assert pipe.text_encoder.get_input_embeddings().weight[-2].sum().item() == 32 assert pipe._maybe_convert_prompt("<xxxxx>", pipe.tokenizer) == "<xxxxx>" token = pipe.tokenizer.convert_tokens_to_ids("<xxxxxx>") assert token == num_tokens + 12, "Added token must be at spot `num_tokens`" assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 64 assert pipe._maybe_convert_prompt("<xxxxxx>", pipe.tokenizer) == "<xxxxxx>" prompt = "hey <xxxxx> <xxxxxx>" out = pipe(prompt, num_inference_steps=1, output_type="numpy").images assert out.shape == (1, 128, 128, 3) # auto1111 multi-token state dict load ten = { "string_to_param": { "*": torch.cat([3 * torch.ones((1, 32)), 4 * torch.ones((1, 32)), 5 * torch.ones((1, 32))]) }, "name": "<xxxx>", } pipe.load_textual_inversion(ten) token = pipe.tokenizer.convert_tokens_to_ids("<xxxx>") token_1 = pipe.tokenizer.convert_tokens_to_ids("<xxxx>_1") token_2 = pipe.tokenizer.convert_tokens_to_ids("<xxxx>_2") assert token == num_tokens + 13, "Added token must be at spot `num_tokens`" assert token_1 == num_tokens + 14, "Added token must be at spot `num_tokens`" assert token_2 == num_tokens + 15, "Added token must be at spot `num_tokens`" assert pipe.text_encoder.get_input_embeddings().weight[-3].sum().item() == 96 assert pipe.text_encoder.get_input_embeddings().weight[-2].sum().item() == 128 assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 160 assert pipe._maybe_convert_prompt("<xxxx>", pipe.tokenizer) == "<xxxx> <xxxx>_1 <xxxx>_2" prompt = "hey <xxxx>" out = pipe(prompt, num_inference_steps=1, output_type="numpy").images assert out.shape == (1, 128, 128, 3) # multiple references to multi embedding ten = {"<cat>": torch.ones(3, 32)} pipe.load_textual_inversion(ten) assert ( pipe._maybe_convert_prompt("<cat> <cat>", pipe.tokenizer) == "<cat> <cat>_1 <cat>_2 <cat> <cat>_1 <cat>_2" ) prompt = "hey <cat> <cat>" out = pipe(prompt, num_inference_steps=1, output_type="numpy").images assert out.shape == (1, 128, 128, 3) def test_text_inversion_multi_tokens(self): pipe1 = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None ) pipe1 = pipe1.to(torch_device) token1, token2 = "<*>", "<**>" ten1 = torch.ones((32,)) ten2 = torch.ones((32,)) * 2 num_tokens = len(pipe1.tokenizer) pipe1.load_textual_inversion(ten1, token=token1) pipe1.load_textual_inversion(ten2, token=token2) emb1 = pipe1.text_encoder.get_input_embeddings().weight pipe2 = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None ) pipe2 = pipe2.to(torch_device) pipe2.load_textual_inversion([ten1, ten2], token=[token1, token2]) emb2 = pipe2.text_encoder.get_input_embeddings().weight pipe3 = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None ) pipe3 = pipe3.to(torch_device) pipe3.load_textual_inversion(torch.stack([ten1, ten2], dim=0), token=[token1, token2]) emb3 = pipe3.text_encoder.get_input_embeddings().weight assert len(pipe1.tokenizer) == len(pipe2.tokenizer) == len(pipe3.tokenizer) == num_tokens + 2 assert ( pipe1.tokenizer.convert_tokens_to_ids(token1) == pipe2.tokenizer.convert_tokens_to_ids(token1) == pipe3.tokenizer.convert_tokens_to_ids(token1) == num_tokens ) assert ( pipe1.tokenizer.convert_tokens_to_ids(token2) == pipe2.tokenizer.convert_tokens_to_ids(token2) == pipe3.tokenizer.convert_tokens_to_ids(token2) == num_tokens + 1 ) assert emb1[num_tokens].sum().item() == emb2[num_tokens].sum().item() == emb3[num_tokens].sum().item() assert ( emb1[num_tokens + 1].sum().item() == emb2[num_tokens + 1].sum().item() == emb3[num_tokens + 1].sum().item() ) def test_download_ignore_files(self): # Check https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe-ignore-files/blob/72f58636e5508a218c6b3f60550dc96445547817/model_index.json#L4 with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights tmpdirname = DiffusionPipeline.download("hf-internal-testing/tiny-stable-diffusion-pipe-ignore-files") all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))] files = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a pytorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_flax_model.msgpack assert not any(f in ["vae/diffusion_pytorch_model.bin", "text_encoder/config.json"] for f in files) assert len(files) == 14 def test_get_pipeline_class_from_flax(self): flax_config = {"_class_name": "FlaxStableDiffusionPipeline"} config = {"_class_name": "StableDiffusionPipeline"} # when loading a PyTorch Pipeline from a FlaxPipeline `model_index.json`, e.g.: https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-lms-pipe/blob/7a9063578b325779f0f1967874a6771caa973cad/model_index.json#L2 # we need to make sure that we don't load the Flax Pipeline class, but instead the PyTorch pipeline class assert _get_pipeline_class(DiffusionPipeline, flax_config) == _get_pipeline_class(DiffusionPipeline, config) class CustomPipelineTests(unittest.TestCase): def test_load_custom_pipeline(self): pipeline = DiffusionPipeline.from_pretrained( "google/ddpm-cifar10-32", custom_pipeline="hf-internal-testing/diffusers-dummy-pipeline" ) pipeline = pipeline.to(torch_device) # NOTE that `"CustomPipeline"` is not a class that is defined in this library, but solely on the Hub # under https://huggingface.co/hf-internal-testing/diffusers-dummy-pipeline/blob/main/pipeline.py#L24 assert pipeline.__class__.__name__ == "CustomPipeline" def test_load_custom_github(self): pipeline = DiffusionPipeline.from_pretrained( "google/ddpm-cifar10-32", custom_pipeline="one_step_unet", custom_revision="main" ) # make sure that on "main" pipeline gives only ones because of: https://github.com/huggingface/diffusers/pull/1690 with torch.no_grad(): output = pipeline() assert output.numel() == output.sum() # hack since Python doesn't like overwriting modules: https://stackoverflow.com/questions/3105801/unload-a-module-in-python # Could in the future work with hashes instead. del sys.modules["diffusers_modules.git.one_step_unet"] pipeline = DiffusionPipeline.from_pretrained( "google/ddpm-cifar10-32", custom_pipeline="one_step_unet", custom_revision="0.10.2" ) with torch.no_grad(): output = pipeline() assert output.numel() != output.sum() assert pipeline.__class__.__name__ == "UnetSchedulerOneForwardPipeline" def test_run_custom_pipeline(self): pipeline = DiffusionPipeline.from_pretrained( "google/ddpm-cifar10-32", custom_pipeline="hf-internal-testing/diffusers-dummy-pipeline" ) pipeline = pipeline.to(torch_device) images, output_str = pipeline(num_inference_steps=2, output_type="np") assert images[0].shape == (1, 32, 32, 3) # compare output to https://huggingface.co/hf-internal-testing/diffusers-dummy-pipeline/blob/main/pipeline.py#L102 assert output_str == "This is a test" def test_remote_components(self): # make sure that trust remote code has to be passed with self.assertRaises(ValueError): pipeline = DiffusionPipeline.from_pretrained("hf-internal-testing/tiny-sdxl-custom-components") # Check that only loading custom componets "my_unet", "my_scheduler" works pipeline = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-sdxl-custom-components", trust_remote_code=True ) assert pipeline.config.unet == ("diffusers_modules.local.my_unet_model", "MyUNetModel") assert pipeline.config.scheduler == ("diffusers_modules.local.my_scheduler", "MyScheduler") assert pipeline.__class__.__name__ == "StableDiffusionXLPipeline" pipeline = pipeline.to(torch_device) images = pipeline("test", num_inference_steps=2, output_type="np")[0] assert images.shape == (1, 64, 64, 3) # Check that only loading custom componets "my_unet", "my_scheduler" and explicit custom pipeline works pipeline = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-sdxl-custom-components", custom_pipeline="my_pipeline", trust_remote_code=True ) assert pipeline.config.unet == ("diffusers_modules.local.my_unet_model", "MyUNetModel") assert pipeline.config.scheduler == ("diffusers_modules.local.my_scheduler", "MyScheduler") assert pipeline.__class__.__name__ == "MyPipeline" pipeline = pipeline.to(torch_device) images = pipeline("test", num_inference_steps=2, output_type="np")[0] assert images.shape == (1, 64, 64, 3) def test_remote_auto_custom_pipe(self): # make sure that trust remote code has to be passed with self.assertRaises(ValueError): pipeline = DiffusionPipeline.from_pretrained("hf-internal-testing/tiny-sdxl-custom-all") # Check that only loading custom componets "my_unet", "my_scheduler" and auto custom pipeline works pipeline = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-sdxl-custom-all", trust_remote_code=True ) assert pipeline.config.unet == ("diffusers_modules.local.my_unet_model", "MyUNetModel") assert pipeline.config.scheduler == ("diffusers_modules.local.my_scheduler", "MyScheduler") assert pipeline.__class__.__name__ == "MyPipeline" pipeline = pipeline.to(torch_device) images = pipeline("test", num_inference_steps=2, output_type="np")[0] assert images.shape == (1, 64, 64, 3) def test_local_custom_pipeline_repo(self): local_custom_pipeline_path = get_tests_dir("fixtures/custom_pipeline") pipeline = DiffusionPipeline.from_pretrained( "google/ddpm-cifar10-32", custom_pipeline=local_custom_pipeline_path ) pipeline = pipeline.to(torch_device) images, output_str = pipeline(num_inference_steps=2, output_type="np") assert pipeline.__class__.__name__ == "CustomLocalPipeline" assert images[0].shape == (1, 32, 32, 3) # compare to https://github.com/huggingface/diffusers/blob/main/tests/fixtures/custom_pipeline/pipeline.py#L102 assert output_str == "This is a local test" def test_local_custom_pipeline_file(self): local_custom_pipeline_path = get_tests_dir("fixtures/custom_pipeline") local_custom_pipeline_path = os.path.join(local_custom_pipeline_path, "what_ever.py") pipeline = DiffusionPipeline.from_pretrained( "google/ddpm-cifar10-32", custom_pipeline=local_custom_pipeline_path ) pipeline = pipeline.to(torch_device) images, output_str = pipeline(num_inference_steps=2, output_type="np") assert pipeline.__class__.__name__ == "CustomLocalPipeline" assert images[0].shape == (1, 32, 32, 3) # compare to https://github.com/huggingface/diffusers/blob/main/tests/fixtures/custom_pipeline/pipeline.py#L102 assert output_str == "This is a local test" def test_custom_model_and_pipeline(self): pipe = CustomPipeline( encoder=CustomEncoder(), scheduler=DDIMScheduler(), ) with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(tmpdirname, safe_serialization=False) pipe_new = CustomPipeline.from_pretrained(tmpdirname) pipe_new.save_pretrained(tmpdirname) conf_1 = dict(pipe.config) conf_2 = dict(pipe_new.config) del conf_2["_name_or_path"] assert conf_1 == conf_2 @slow @require_torch_gpu def test_download_from_git(self): # Because adaptive_avg_pool2d_backward_cuda # does not have a deterministic implementation. clip_model_id = "laion/CLIP-ViT-B-32-laion2B-s34B-b79K" feature_extractor = CLIPImageProcessor.from_pretrained(clip_model_id) clip_model = CLIPModel.from_pretrained(clip_model_id, torch_dtype=torch.float16) pipeline = DiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", custom_pipeline="clip_guided_stable_diffusion", clip_model=clip_model, feature_extractor=feature_extractor, torch_dtype=torch.float16, ) pipeline.enable_attention_slicing() pipeline = pipeline.to(torch_device) # NOTE that `"CLIPGuidedStableDiffusion"` is not a class that is defined in the pypi package of th e library, but solely on the community examples folder of GitHub under: # https://github.com/huggingface/diffusers/blob/main/examples/community/clip_guided_stable_diffusion.py assert pipeline.__class__.__name__ == "CLIPGuidedStableDiffusion" image = pipeline("a prompt", num_inference_steps=2, output_type="np").images[0] assert image.shape == (512, 512, 3) def test_save_pipeline_change_config(self): pipe = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None ) with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(tmpdirname) pipe = DiffusionPipeline.from_pretrained(tmpdirname) assert pipe.scheduler.__class__.__name__ == "PNDMScheduler" # let's make sure that changing the scheduler is correctly reflected with tempfile.TemporaryDirectory() as tmpdirname: pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.save_pretrained(tmpdirname) pipe = DiffusionPipeline.from_pretrained(tmpdirname) assert pipe.scheduler.__class__.__name__ == "DPMSolverMultistepScheduler" class PipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def dummy_image(self): batch_size = 1 num_channels = 3 sizes = (32, 32) image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) return image def dummy_uncond_unet(self, sample_size=32): torch.manual_seed(0) model = UNet2DModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=sample_size, in_channels=3, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), ) return model def dummy_cond_unet(self, sample_size=32): torch.manual_seed(0) model = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=sample_size, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) return model @property def dummy_vae(self): torch.manual_seed(0) model = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) return model @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(config) @property def dummy_extractor(self): def extract(*args, **kwargs): class Out: def __init__(self): self.pixel_values = torch.ones([0]) def to(self, device): self.pixel_values.to(device) return self return Out() return extract @parameterized.expand( [ [DDIMScheduler, DDIMPipeline, 32], [DDPMScheduler, DDPMPipeline, 32], [DDIMScheduler, DDIMPipeline, (32, 64)], [DDPMScheduler, DDPMPipeline, (64, 32)], ] ) def test_uncond_unet_components(self, scheduler_fn=DDPMScheduler, pipeline_fn=DDPMPipeline, sample_size=32): unet = self.dummy_uncond_unet(sample_size) scheduler = scheduler_fn() pipeline = pipeline_fn(unet, scheduler).to(torch_device) generator = torch.manual_seed(0) out_image = pipeline( generator=generator, num_inference_steps=2, output_type="np", ).images sample_size = (sample_size, sample_size) if isinstance(sample_size, int) else sample_size assert out_image.shape == (1, *sample_size, 3) def test_stable_diffusion_components(self): """Test that components property works correctly""" unet = self.dummy_cond_unet() scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") image = self.dummy_image().cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB") mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((32, 32)) # make sure here that pndm scheduler skips prk inpaint = StableDiffusionInpaintPipelineLegacy( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ).to(torch_device) img2img = StableDiffusionImg2ImgPipeline(**inpaint.components, image_encoder=None).to(torch_device) text2img = StableDiffusionPipeline(**inpaint.components, image_encoder=None).to(torch_device) prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) image_inpaint = inpaint( [prompt], generator=generator, num_inference_steps=2, output_type="np", image=init_image, mask_image=mask_image, ).images image_img2img = img2img( [prompt], generator=generator, num_inference_steps=2, output_type="np", image=init_image, ).images image_text2img = text2img( [prompt], generator=generator, num_inference_steps=2, output_type="np", ).images assert image_inpaint.shape == (1, 32, 32, 3) assert image_img2img.shape == (1, 32, 32, 3) assert image_text2img.shape == (1, 64, 64, 3) @require_torch_gpu def test_pipe_false_offload_warn(self): unet = self.dummy_cond_unet() scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") sd = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) sd.enable_model_cpu_offload() logger = logging.get_logger("diffusers.pipelines.pipeline_utils") with CaptureLogger(logger) as cap_logger: sd.to("cuda") assert "It is strongly recommended against doing so" in str(cap_logger) sd = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) def test_set_scheduler(self): unet = self.dummy_cond_unet() scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") sd = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) sd.scheduler = DDIMScheduler.from_config(sd.scheduler.config) assert isinstance(sd.scheduler, DDIMScheduler) sd.scheduler = DDPMScheduler.from_config(sd.scheduler.config) assert isinstance(sd.scheduler, DDPMScheduler) sd.scheduler = PNDMScheduler.from_config(sd.scheduler.config) assert isinstance(sd.scheduler, PNDMScheduler) sd.scheduler = LMSDiscreteScheduler.from_config(sd.scheduler.config) assert isinstance(sd.scheduler, LMSDiscreteScheduler) sd.scheduler = EulerDiscreteScheduler.from_config(sd.scheduler.config) assert isinstance(sd.scheduler, EulerDiscreteScheduler) sd.scheduler = EulerAncestralDiscreteScheduler.from_config(sd.scheduler.config) assert isinstance(sd.scheduler, EulerAncestralDiscreteScheduler) sd.scheduler = DPMSolverMultistepScheduler.from_config(sd.scheduler.config) assert isinstance(sd.scheduler, DPMSolverMultistepScheduler) def test_set_component_to_none(self): unet = self.dummy_cond_unet() scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") pipeline = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "This is a flower" out_image = pipeline( prompt=prompt, generator=generator, num_inference_steps=1, output_type="np", ).images pipeline.feature_extractor = None generator = torch.Generator(device="cpu").manual_seed(0) out_image_2 = pipeline( prompt=prompt, generator=generator, num_inference_steps=1, output_type="np", ).images assert out_image.shape == (1, 64, 64, 3) assert np.abs(out_image - out_image_2).max() < 1e-3 def test_optional_components_is_none(self): unet = self.dummy_cond_unet() scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") items = { "feature_extractor": self.dummy_extractor, "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": bert, "tokenizer": tokenizer, "safety_checker": None, # we don't add an image encoder } pipeline = StableDiffusionPipeline(**items) assert sorted(pipeline.components.keys()) == sorted(["image_encoder"] + list(items.keys())) assert pipeline.image_encoder is None def test_set_scheduler_consistency(self): unet = self.dummy_cond_unet() pndm = PNDMScheduler.from_config("hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler") ddim = DDIMScheduler.from_config("hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler") vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") sd = StableDiffusionPipeline( unet=unet, scheduler=pndm, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) pndm_config = sd.scheduler.config sd.scheduler = DDPMScheduler.from_config(pndm_config) sd.scheduler = PNDMScheduler.from_config(sd.scheduler.config) pndm_config_2 = sd.scheduler.config pndm_config_2 = {k: v for k, v in pndm_config_2.items() if k in pndm_config} assert dict(pndm_config) == dict(pndm_config_2) sd = StableDiffusionPipeline( unet=unet, scheduler=ddim, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) ddim_config = sd.scheduler.config sd.scheduler = LMSDiscreteScheduler.from_config(ddim_config) sd.scheduler = DDIMScheduler.from_config(sd.scheduler.config) ddim_config_2 = sd.scheduler.config ddim_config_2 = {k: v for k, v in ddim_config_2.items() if k in ddim_config} assert dict(ddim_config) == dict(ddim_config_2) def test_save_safe_serialization(self): pipeline = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch") with tempfile.TemporaryDirectory() as tmpdirname: pipeline.save_pretrained(tmpdirname, safe_serialization=True) # Validate that the VAE safetensor exists and are of the correct format vae_path = os.path.join(tmpdirname, "vae", "diffusion_pytorch_model.safetensors") assert os.path.exists(vae_path), f"Could not find {vae_path}" _ = safetensors.torch.load_file(vae_path) # Validate that the UNet safetensor exists and are of the correct format unet_path = os.path.join(tmpdirname, "unet", "diffusion_pytorch_model.safetensors") assert os.path.exists(unet_path), f"Could not find {unet_path}" _ = safetensors.torch.load_file(unet_path) # Validate that the text encoder safetensor exists and are of the correct format text_encoder_path = os.path.join(tmpdirname, "text_encoder", "model.safetensors") assert os.path.exists(text_encoder_path), f"Could not find {text_encoder_path}" _ = safetensors.torch.load_file(text_encoder_path) pipeline = StableDiffusionPipeline.from_pretrained(tmpdirname) assert pipeline.unet is not None assert pipeline.vae is not None assert pipeline.text_encoder is not None assert pipeline.scheduler is not None assert pipeline.feature_extractor is not None def test_no_pytorch_download_when_doing_safetensors(self): # by default we don't download with tempfile.TemporaryDirectory() as tmpdirname: _ = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/diffusers-stable-diffusion-tiny-all", cache_dir=tmpdirname ) path = os.path.join( tmpdirname, "models--hf-internal-testing--diffusers-stable-diffusion-tiny-all", "snapshots", "07838d72e12f9bcec1375b0482b80c1d399be843", "unet", ) # safetensors exists assert os.path.exists(os.path.join(path, "diffusion_pytorch_model.safetensors")) # pytorch does not assert not os.path.exists(os.path.join(path, "diffusion_pytorch_model.bin")) def test_no_safetensors_download_when_doing_pytorch(self): use_safetensors = False with tempfile.TemporaryDirectory() as tmpdirname: _ = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/diffusers-stable-diffusion-tiny-all", cache_dir=tmpdirname, use_safetensors=use_safetensors, ) path = os.path.join( tmpdirname, "models--hf-internal-testing--diffusers-stable-diffusion-tiny-all", "snapshots", "07838d72e12f9bcec1375b0482b80c1d399be843", "unet", ) # safetensors does not exists assert not os.path.exists(os.path.join(path, "diffusion_pytorch_model.safetensors")) # pytorch does assert os.path.exists(os.path.join(path, "diffusion_pytorch_model.bin")) def test_optional_components(self): unet = self.dummy_cond_unet() pndm = PNDMScheduler.from_config("hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler") vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") orig_sd = StableDiffusionPipeline( unet=unet, scheduler=pndm, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=unet, feature_extractor=self.dummy_extractor, ) sd = orig_sd assert sd.config.requires_safety_checker is True with tempfile.TemporaryDirectory() as tmpdirname: sd.save_pretrained(tmpdirname) # Test that passing None works sd = StableDiffusionPipeline.from_pretrained( tmpdirname, feature_extractor=None, safety_checker=None, requires_safety_checker=False ) assert sd.config.requires_safety_checker is False assert sd.config.safety_checker == (None, None) assert sd.config.feature_extractor == (None, None) with tempfile.TemporaryDirectory() as tmpdirname: sd.save_pretrained(tmpdirname) # Test that loading previous None works sd = StableDiffusionPipeline.from_pretrained(tmpdirname) assert sd.config.requires_safety_checker is False assert sd.config.safety_checker == (None, None) assert sd.config.feature_extractor == (None, None) orig_sd.save_pretrained(tmpdirname) # Test that loading without any directory works shutil.rmtree(os.path.join(tmpdirname, "safety_checker")) with open(os.path.join(tmpdirname, sd.config_name)) as f: config = json.load(f) config["safety_checker"] = [None, None] with open(os.path.join(tmpdirname, sd.config_name), "w") as f: json.dump(config, f) sd = StableDiffusionPipeline.from_pretrained(tmpdirname, requires_safety_checker=False) sd.save_pretrained(tmpdirname) sd = StableDiffusionPipeline.from_pretrained(tmpdirname) assert sd.config.requires_safety_checker is False assert sd.config.safety_checker == (None, None) assert sd.config.feature_extractor == (None, None) # Test that loading from deleted model index works with open(os.path.join(tmpdirname, sd.config_name)) as f: config = json.load(f) del config["safety_checker"] del config["feature_extractor"] with open(os.path.join(tmpdirname, sd.config_name), "w") as f: json.dump(config, f) sd = StableDiffusionPipeline.from_pretrained(tmpdirname) assert sd.config.requires_safety_checker is False assert sd.config.safety_checker == (None, None) assert sd.config.feature_extractor == (None, None) with tempfile.TemporaryDirectory() as tmpdirname: sd.save_pretrained(tmpdirname) # Test that partially loading works sd = StableDiffusionPipeline.from_pretrained(tmpdirname, feature_extractor=self.dummy_extractor) assert sd.config.requires_safety_checker is False assert sd.config.safety_checker == (None, None) assert sd.config.feature_extractor != (None, None) # Test that partially loading works sd = StableDiffusionPipeline.from_pretrained( tmpdirname, feature_extractor=self.dummy_extractor, safety_checker=unet, requires_safety_checker=[True, True], ) assert sd.config.requires_safety_checker == [True, True] assert sd.config.safety_checker != (None, None) assert sd.config.feature_extractor != (None, None) with tempfile.TemporaryDirectory() as tmpdirname: sd.save_pretrained(tmpdirname) sd = StableDiffusionPipeline.from_pretrained(tmpdirname, feature_extractor=self.dummy_extractor) assert sd.config.requires_safety_checker == [True, True] assert sd.config.safety_checker != (None, None) assert sd.config.feature_extractor != (None, None) def test_name_or_path(self): model_path = "hf-internal-testing/tiny-stable-diffusion-torch" sd = DiffusionPipeline.from_pretrained(model_path) assert sd.name_or_path == model_path with tempfile.TemporaryDirectory() as tmpdirname: sd.save_pretrained(tmpdirname) sd = DiffusionPipeline.from_pretrained(tmpdirname) assert sd.name_or_path == tmpdirname def test_error_no_variant_available(self): variant = "fp16" with self.assertRaises(ValueError) as error_context: _ = StableDiffusionPipeline.download( "hf-internal-testing/diffusers-stable-diffusion-tiny-all", variant=variant ) assert "but no such modeling files are available" in str(error_context.exception) assert variant in str(error_context.exception) def test_pipe_to(self): unet = self.dummy_cond_unet() scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") sd = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) device_type = torch.device(torch_device).type sd1 = sd.to(device_type) sd2 = sd.to(torch.device(device_type)) sd3 = sd.to(device_type, torch.float32) sd4 = sd.to(device=device_type) sd5 = sd.to(torch_device=device_type) sd6 = sd.to(device_type, dtype=torch.float32) sd7 = sd.to(device_type, torch_dtype=torch.float32) assert sd1.device.type == device_type assert sd2.device.type == device_type assert sd3.device.type == device_type assert sd4.device.type == device_type assert sd5.device.type == device_type assert sd6.device.type == device_type assert sd7.device.type == device_type sd1 = sd.to(torch.float16) sd2 = sd.to(None, torch.float16) sd3 = sd.to(dtype=torch.float16) sd4 = sd.to(torch_dtype=torch.float16) sd5 = sd.to(None, dtype=torch.float16) sd6 = sd.to(None, torch_dtype=torch.float16) assert sd1.dtype == torch.float16 assert sd2.dtype == torch.float16 assert sd3.dtype == torch.float16 assert sd4.dtype == torch.float16 assert sd5.dtype == torch.float16 assert sd6.dtype == torch.float16 sd1 = sd.to(device=device_type, dtype=torch.float16) sd2 = sd.to(torch_device=device_type, torch_dtype=torch.float16) sd3 = sd.to(device_type, torch.float16) assert sd1.dtype == torch.float16 assert sd2.dtype == torch.float16 assert sd3.dtype == torch.float16 assert sd1.device.type == device_type assert sd2.device.type == device_type assert sd3.device.type == device_type def test_pipe_same_device_id_offload(self): unet = self.dummy_cond_unet() scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") sd = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) sd.enable_model_cpu_offload(gpu_id=5) assert sd._offload_gpu_id == 5 sd.maybe_free_model_hooks() assert sd._offload_gpu_id == 5 @slow @require_torch_gpu class PipelineSlowTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_smart_download(self): model_id = "hf-internal-testing/unet-pipeline-dummy" with tempfile.TemporaryDirectory() as tmpdirname: _ = DiffusionPipeline.from_pretrained(model_id, cache_dir=tmpdirname, force_download=True) local_repo_name = "--".join(["models"] + model_id.split("/")) snapshot_dir = os.path.join(tmpdirname, local_repo_name, "snapshots") snapshot_dir = os.path.join(snapshot_dir, os.listdir(snapshot_dir)[0]) # inspect all downloaded files to make sure that everything is included assert os.path.isfile(os.path.join(snapshot_dir, DiffusionPipeline.config_name)) assert os.path.isfile(os.path.join(snapshot_dir, CONFIG_NAME)) assert os.path.isfile(os.path.join(snapshot_dir, SCHEDULER_CONFIG_NAME)) assert os.path.isfile(os.path.join(snapshot_dir, WEIGHTS_NAME)) assert os.path.isfile(os.path.join(snapshot_dir, "scheduler", SCHEDULER_CONFIG_NAME)) assert os.path.isfile(os.path.join(snapshot_dir, "unet", WEIGHTS_NAME)) assert os.path.isfile(os.path.join(snapshot_dir, "unet", WEIGHTS_NAME)) # let's make sure the super large numpy file: # https://huggingface.co/hf-internal-testing/unet-pipeline-dummy/blob/main/big_array.npy # is not downloaded, but all the expected ones assert not os.path.isfile(os.path.join(snapshot_dir, "big_array.npy")) def test_warning_unused_kwargs(self): model_id = "hf-internal-testing/unet-pipeline-dummy" logger = logging.get_logger("diffusers.pipelines") with tempfile.TemporaryDirectory() as tmpdirname: with CaptureLogger(logger) as cap_logger: DiffusionPipeline.from_pretrained( model_id, not_used=True, cache_dir=tmpdirname, force_download=True, ) assert ( cap_logger.out.strip().split("\n")[-1] == "Keyword arguments {'not_used': True} are not expected by DDPMPipeline and will be ignored." ) def test_from_save_pretrained(self): # 1. Load models model = UNet2DModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), ) scheduler = DDPMScheduler(num_train_timesteps=10) ddpm = DDPMPipeline(model, scheduler) ddpm.to(torch_device) ddpm.set_progress_bar_config(disable=None) with tempfile.TemporaryDirectory() as tmpdirname: ddpm.save_pretrained(tmpdirname) new_ddpm = DDPMPipeline.from_pretrained(tmpdirname) new_ddpm.to(torch_device) generator = torch.Generator(device=torch_device).manual_seed(0) image = ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images generator = torch.Generator(device=torch_device).manual_seed(0) new_image = new_ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass" @require_python39_or_higher @require_torch_2 def test_from_save_pretrained_dynamo(self): run_test_in_subprocess(test_case=self, target_func=_test_from_save_pretrained_dynamo, inputs=None) def test_from_pretrained_hub(self): model_path = "google/ddpm-cifar10-32" scheduler = DDPMScheduler(num_train_timesteps=10) ddpm = DDPMPipeline.from_pretrained(model_path, scheduler=scheduler) ddpm = ddpm.to(torch_device) ddpm.set_progress_bar_config(disable=None) ddpm_from_hub = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler) ddpm_from_hub = ddpm_from_hub.to(torch_device) ddpm_from_hub.set_progress_bar_config(disable=None) generator = torch.Generator(device=torch_device).manual_seed(0) image = ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images generator = torch.Generator(device=torch_device).manual_seed(0) new_image = ddpm_from_hub(generator=generator, num_inference_steps=5, output_type="numpy").images assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass" def test_from_pretrained_hub_pass_model(self): model_path = "google/ddpm-cifar10-32" scheduler = DDPMScheduler(num_train_timesteps=10) # pass unet into DiffusionPipeline unet = UNet2DModel.from_pretrained(model_path) ddpm_from_hub_custom_model = DiffusionPipeline.from_pretrained(model_path, unet=unet, scheduler=scheduler) ddpm_from_hub_custom_model = ddpm_from_hub_custom_model.to(torch_device) ddpm_from_hub_custom_model.set_progress_bar_config(disable=None) ddpm_from_hub = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler) ddpm_from_hub = ddpm_from_hub.to(torch_device) ddpm_from_hub_custom_model.set_progress_bar_config(disable=None) generator = torch.Generator(device=torch_device).manual_seed(0) image = ddpm_from_hub_custom_model(generator=generator, num_inference_steps=5, output_type="numpy").images generator = torch.Generator(device=torch_device).manual_seed(0) new_image = ddpm_from_hub(generator=generator, num_inference_steps=5, output_type="numpy").images assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass" def test_output_format(self): model_path = "google/ddpm-cifar10-32" scheduler = DDIMScheduler.from_pretrained(model_path) pipe = DDIMPipeline.from_pretrained(model_path, scheduler=scheduler) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) images = pipe(output_type="numpy").images assert images.shape == (1, 32, 32, 3) assert isinstance(images, np.ndarray) images = pipe(output_type="pil", num_inference_steps=4).images assert isinstance(images, list) assert len(images) == 1 assert isinstance(images[0], PIL.Image.Image) # use PIL by default images = pipe(num_inference_steps=4).images assert isinstance(images, list) assert isinstance(images[0], PIL.Image.Image) @require_flax def test_from_flax_from_pt(self): pipe_pt = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None ) pipe_pt.to(torch_device) from diffusers import FlaxStableDiffusionPipeline with tempfile.TemporaryDirectory() as tmpdirname: pipe_pt.save_pretrained(tmpdirname) pipe_flax, params = FlaxStableDiffusionPipeline.from_pretrained( tmpdirname, safety_checker=None, from_pt=True ) with tempfile.TemporaryDirectory() as tmpdirname: pipe_flax.save_pretrained(tmpdirname, params=params) pipe_pt_2 = StableDiffusionPipeline.from_pretrained(tmpdirname, safety_checker=None, from_flax=True) pipe_pt_2.to(torch_device) prompt = "Hello" generator = torch.manual_seed(0) image_0 = pipe_pt( [prompt], generator=generator, num_inference_steps=2, output_type="np", ).images[0] generator = torch.manual_seed(0) image_1 = pipe_pt_2( [prompt], generator=generator, num_inference_steps=2, output_type="np", ).images[0] assert np.abs(image_0 - image_1).sum() < 1e-5, "Models don't give the same forward pass" @require_compel def test_weighted_prompts_compel(self): from compel import Compel pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.enable_attention_slicing() compel = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder) prompt = "a red cat playing with a ball{}" prompts = [prompt.format(s) for s in ["", "++", "--"]] prompt_embeds = compel(prompts) generator = [torch.Generator(device="cpu").manual_seed(33) for _ in range(prompt_embeds.shape[0])] images = pipe( prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20, output_type="numpy" ).images for i, image in enumerate(images): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" f"/compel/forest_{i}.npy" ) assert np.abs(image - expected_image).max() < 3e-1 @nightly @require_torch_gpu class PipelineNightlyTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_ddpm_ddim_equality_batched(self): seed = 0 model_id = "google/ddpm-cifar10-32" unet = UNet2DModel.from_pretrained(model_id) ddpm_scheduler = DDPMScheduler() ddim_scheduler = DDIMScheduler() ddpm = DDPMPipeline(unet=unet, scheduler=ddpm_scheduler) ddpm.to(torch_device) ddpm.set_progress_bar_config(disable=None) ddim = DDIMPipeline(unet=unet, scheduler=ddim_scheduler) ddim.to(torch_device) ddim.set_progress_bar_config(disable=None) generator = torch.Generator(device=torch_device).manual_seed(seed) ddpm_images = ddpm(batch_size=2, generator=generator, output_type="numpy").images generator = torch.Generator(device=torch_device).manual_seed(seed) ddim_images = ddim( batch_size=2, generator=generator, num_inference_steps=1000, eta=1.0, output_type="numpy", use_clipped_model_output=True, # Need this to make DDIM match DDPM ).images # the values aren't exactly equal, but the images look the same visually assert np.abs(ddpm_images - ddim_images).max() < 1e-1
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/pipelines/test_pipeline_utils.py
import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class IsSafetensorsCompatibleTests(unittest.TestCase): def test_all_is_compatible(self): filenames = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(filenames)) def test_diffusers_model_is_compatible(self): filenames = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(filenames)) def test_diffusers_model_is_not_compatible(self): filenames = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(filenames)) def test_transformer_model_is_compatible(self): filenames = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] self.assertTrue(is_safetensors_compatible(filenames)) def test_transformer_model_is_not_compatible(self): filenames = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", # Removed: 'text_encoder/model.safetensors', "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertFalse(is_safetensors_compatible(filenames)) def test_all_is_compatible_variant(self): filenames = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] variant = "fp16" self.assertTrue(is_safetensors_compatible(filenames, variant=variant)) def test_diffusers_model_is_compatible_variant(self): filenames = [ "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] variant = "fp16" self.assertTrue(is_safetensors_compatible(filenames, variant=variant)) def test_diffusers_model_is_compatible_variant_partial(self): # pass variant but use the non-variant filenames filenames = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] variant = "fp16" self.assertTrue(is_safetensors_compatible(filenames, variant=variant)) def test_diffusers_model_is_not_compatible_variant(self): filenames = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] variant = "fp16" self.assertFalse(is_safetensors_compatible(filenames, variant=variant)) def test_transformer_model_is_compatible_variant(self): filenames = [ "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", ] variant = "fp16" self.assertTrue(is_safetensors_compatible(filenames, variant=variant)) def test_transformer_model_is_compatible_variant_partial(self): # pass variant but use the non-variant filenames filenames = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] variant = "fp16" self.assertTrue(is_safetensors_compatible(filenames, variant=variant)) def test_transformer_model_is_not_compatible_variant(self): filenames = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", # 'text_encoder/model.fp16.safetensors', "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] variant = "fp16" self.assertFalse(is_safetensors_compatible(filenames, variant=variant))
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/pipelines/test_pipelines_onnx_common.py
from diffusers.utils.testing_utils import require_onnxruntime @require_onnxruntime class OnnxPipelineTesterMixin: """ This mixin is designed to be used with unittest.TestCase classes. It provides a set of common tests for each ONNXRuntime pipeline, e.g. saving and loading the pipeline, equivalence of dict and tuple outputs, etc. """ pass
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/pipelines/test_pipelines_flax.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class DownloadTests(unittest.TestCase): def test_download_only_pytorch(self): with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights _ = FlaxDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname ) all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname, os.listdir(tmpdirname)[0], "snapshots"))] files = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith(".bin") for f in files) @slow @require_flax class FlaxPipelineTests(unittest.TestCase): def test_dummy_all_tpus(self): pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None ) prompt = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) prng_seed = jax.random.PRNGKey(0) num_inference_steps = 4 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, num_samples) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 4.1514745) < 1e-3 assert np.abs(np.abs(images, dtype=np.float32).sum() - 49947.875) < 5e-1 images_pil = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) assert len(images_pil) == num_samples def test_stable_diffusion_v1_4(self): pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="flax", safety_checker=None ) prompt = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, num_samples) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 0.05652401)) < 1e-2 assert np.abs((np.abs(images, dtype=np.float32).sum() - 2383808.2)) < 5e-1 def test_stable_diffusion_v1_4_bfloat_16(self): pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloat16, safety_checker=None ) prompt = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, num_samples) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 0.04003906)) < 5e-2 assert np.abs((np.abs(images, dtype=np.float32).sum() - 2373516.75)) < 5e-1 def test_stable_diffusion_v1_4_bfloat_16_with_safety(self): pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloat16 ) prompt = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, num_samples) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 0.04003906)) < 5e-2 assert np.abs((np.abs(images, dtype=np.float32).sum() - 2373516.75)) < 5e-1 def test_stable_diffusion_v1_4_bfloat_16_ddim(self): scheduler = FlaxDDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", set_alpha_to_one=False, steps_offset=1, ) pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloat16, scheduler=scheduler, safety_checker=None, ) scheduler_state = scheduler.create_state() params["scheduler"] = scheduler_state prompt = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, num_samples) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 0.045043945)) < 5e-2 assert np.abs((np.abs(images, dtype=np.float32).sum() - 2347693.5)) < 5e-1 def test_jax_memory_efficient_attention(self): prompt = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) num_samples = jax.device_count() prompt = num_samples * [prompt] prng_seed = jax.random.split(jax.random.PRNGKey(0), num_samples) pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloat16, safety_checker=None, ) params = replicate(params) prompt_ids = pipeline.prepare_inputs(prompt) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, jit=True).images assert images.shape == (num_samples, 1, 512, 512, 3) slice = images[2, 0, 256, 10:17, 1] # With memory efficient attention pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloat16, safety_checker=None, use_memory_efficient_attention=True, ) params = replicate(params) prompt_ids = pipeline.prepare_inputs(prompt) prompt_ids = shard(prompt_ids) images_eff = pipeline(prompt_ids, params, prng_seed, jit=True).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) slice_eff = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/pndm/test_pndm.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNet2DModel from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch, torch_device enable_full_determinism() class PNDMPipelineFastTests(unittest.TestCase): @property def dummy_uncond_unet(self): torch.manual_seed(0) model = UNet2DModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), ) return model def test_inference(self): unet = self.dummy_uncond_unet scheduler = PNDMScheduler() pndm = PNDMPipeline(unet=unet, scheduler=scheduler) pndm.to(torch_device) pndm.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) image = pndm(generator=generator, num_inference_steps=20, output_type="numpy").images generator = torch.manual_seed(0) image_from_tuple = pndm(generator=generator, num_inference_steps=20, output_type="numpy", return_dict=False)[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @nightly @require_torch class PNDMPipelineIntegrationTests(unittest.TestCase): def test_inference_cifar10(self): model_id = "google/ddpm-cifar10-32" unet = UNet2DModel.from_pretrained(model_id) scheduler = PNDMScheduler() pndm = PNDMPipeline(unet=unet, scheduler=scheduler) pndm.to(torch_device) pndm.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) image = pndm(generator=generator, output_type="numpy").images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/controlnet/test_controlnet.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import traceback import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, EulerDiscreteScheduler, LCMScheduler, StableDiffusionControlNetPipeline, UNet2DConditionModel, ) from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, load_image, load_numpy, require_python39_or_higher, require_torch_2, require_torch_gpu, run_test_in_subprocess, slow, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() # Will be run via run_test_in_subprocess def _test_stable_diffusion_compile(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.to("cuda") pipe.set_progress_bar_config(disable=None) pipe.unet.to(memory_format=torch.channels_last) pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) pipe.controlnet.to(memory_format=torch.channels_last) pipe.controlnet = torch.compile(pipe.controlnet, mode="reduce-overhead", fullgraph=True) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512)) output = pipe(prompt, image, num_inference_steps=10, generator=generator, output_type="np") image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny_out_full.npy" ) expected_image = np.resize(expected_image, (512, 512, 3)) assert np.abs(expected_image - image).max() < 1.0 except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() class ControlNetPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=1, time_cond_proj_dim=time_cond_proj_dim, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 image = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": image, } return inputs def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_controlnet_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionControlNetPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.52700454, 0.3930534, 0.25509018, 0.7132304, 0.53696585, 0.46568912, 0.7095368, 0.7059624, 0.4744786] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_controlnet_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionControlNetPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.52700454, 0.3930534, 0.25509018, 0.7132304, 0.53696585, 0.46568912, 0.7095368, 0.7059624, 0.4744786] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 class StableDiffusionMultiControlNetPipelineFastTests( PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=1, ) torch.manual_seed(0) def init_weights(m): if isinstance(m, torch.nn.Conv2d): torch.nn.init.normal(m.weight) m.bias.data.fill_(1.0) controlnet1 = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) controlnet1.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) controlnet2 = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) controlnet2.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") controlnet = MultiControlNetModel([controlnet1, controlnet2]) components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 images = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), ] inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": images, } return inputs def test_control_guidance_switch(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) scale = 10.0 steps = 4 inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_1 = pipe(**inputs)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_3 = pipe(**inputs, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7])[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8])[0] # make sure that all outputs are different assert np.sum(np.abs(output_1 - output_2)) > 1e-3 assert np.sum(np.abs(output_1 - output_3)) > 1e-3 assert np.sum(np.abs(output_1 - output_4)) > 1e-3 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_save_pretrained_raise_not_implemented_exception(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(tmpdir) except NotImplementedError: pass class StableDiffusionMultiControlNetOneModelPipelineFastTests( PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=1, ) torch.manual_seed(0) def init_weights(m): if isinstance(m, torch.nn.Conv2d): torch.nn.init.normal(m.weight) m.bias.data.fill_(1.0) controlnet = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) controlnet.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") controlnet = MultiControlNetModel([controlnet]) components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 images = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), ] inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": images, } return inputs def test_control_guidance_switch(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) scale = 10.0 steps = 4 inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_1 = pipe(**inputs)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_3 = pipe( **inputs, control_guidance_start=[0.1], control_guidance_end=[0.2], )[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5])[0] # make sure that all outputs are different assert np.sum(np.abs(output_1 - output_2)) > 1e-3 assert np.sum(np.abs(output_1 - output_3)) > 1e-3 assert np.sum(np.abs(output_1 - output_4)) > 1e-3 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_save_pretrained_raise_not_implemented_exception(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(tmpdir) except NotImplementedError: pass @slow @require_torch_gpu class ControlNetPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_canny(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (768, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny_out.npy" ) assert np.abs(expected_image - image).max() < 9e-2 def test_depth(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-depth") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "Stormtrooper's lecture" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/stormtrooper_depth.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/stormtrooper_depth_out.npy" ) assert np.abs(expected_image - image).max() < 8e-1 def test_hed(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-hed") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "oil painting of handsome old man, masterpiece" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/man_hed.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (704, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/man_hed_out.npy" ) assert np.abs(expected_image - image).max() < 8e-2 def test_mlsd(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-mlsd") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "room" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/room_mlsd.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (704, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/room_mlsd_out.npy" ) assert np.abs(expected_image - image).max() < 5e-2 def test_normal(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-normal") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "cute toy" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/cute_toy_normal.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/cute_toy_normal_out.npy" ) assert np.abs(expected_image - image).max() < 5e-2 def test_openpose(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "Chef in the kitchen" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (768, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/chef_pose_out.npy" ) assert np.abs(expected_image - image).max() < 8e-2 def test_scribble(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-scribble") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(5) prompt = "bag" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bag_scribble.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (640, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bag_scribble_out.npy" ) assert np.abs(expected_image - image).max() < 8e-2 def test_seg(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(5) prompt = "house" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/house_seg.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/house_seg_out.npy" ) assert np.abs(expected_image - image).max() < 8e-2 def test_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() prompt = "house" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/house_seg.png" ) _ = pipe( prompt, image, num_inference_steps=2, output_type="np", ) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 4 * 10**9 def test_canny_guess_mode(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) output = pipe( prompt, image, generator=generator, output_type="np", num_inference_steps=3, guidance_scale=3.0, guess_mode=True, ) image = output.images[0] assert image.shape == (768, 512, 3) image_slice = image[-3:, -3:, -1] expected_slice = np.array([0.2724, 0.2846, 0.2724, 0.3843, 0.3682, 0.2736, 0.4675, 0.3862, 0.2887]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_canny_guess_mode_euler(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) output = pipe( prompt, image, generator=generator, output_type="np", num_inference_steps=3, guidance_scale=3.0, guess_mode=True, ) image = output.images[0] assert image.shape == (768, 512, 3) image_slice = image[-3:, -3:, -1] expected_slice = np.array([0.1655, 0.1721, 0.1623, 0.1685, 0.1711, 0.1646, 0.1651, 0.1631, 0.1494]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @require_python39_or_higher @require_torch_2 def test_stable_diffusion_compile(self): run_test_in_subprocess(test_case=self, target_func=_test_stable_diffusion_compile, inputs=None) def test_v11_shuffle_global_pool_conditions(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11e_sd15_shuffle") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "New York" image = load_image( "https://huggingface.co/lllyasviel/control_v11e_sd15_shuffle/resolve/main/images/control.png" ) output = pipe( prompt, image, generator=generator, output_type="np", num_inference_steps=3, guidance_scale=7.0, ) image = output.images[0] assert image.shape == (512, 640, 3) image_slice = image[-3:, -3:, -1] expected_slice = np.array([0.1338, 0.1597, 0.1202, 0.1687, 0.1377, 0.1017, 0.2070, 0.1574, 0.1348]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_load_local(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") pipe_1 = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) controlnet = ControlNetModel.from_single_file( "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth" ) pipe_2 = StableDiffusionControlNetPipeline.from_single_file( "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors", safety_checker=None, controlnet=controlnet, ) pipes = [pipe_1, pipe_2] images = [] for pipe in pipes: pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) images.append(output.images[0]) del pipe gc.collect() torch.cuda.empty_cache() assert np.abs(images[0] - images[1]).max() < 1e-3 @slow @require_torch_gpu class StableDiffusionMultiControlNetPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_pose_and_canny(self): controlnet_canny = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") controlnet_pose = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=[controlnet_pose, controlnet_canny] ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird and Chef" image_canny = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) image_pose = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" ) output = pipe(prompt, [image_pose, image_canny], generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (768, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose_canny_out.npy" ) assert np.abs(expected_image - image).max() < 5e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/controlnet/test_controlnet_sdxl.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, EulerDiscreteScheduler, LCMScheduler, StableDiffusionXLControlNetPipeline, UNet2DConditionModel, ) from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, slow, torch_device from diffusers.utils.torch_utils import randn_tensor from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, ) enable_full_determinism() class StableDiffusionXLControlNetPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, unittest.TestCase, ): pipeline_class = StableDiffusionXLControlNetPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, time_cond_proj_dim=time_cond_proj_dim, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), conditioning_embedding_out_channels=(16, 32), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) torch.manual_seed(0) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 image = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", "image": image, } return inputs def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_save_load_optional_components(self): self._test_save_load_optional_components() @require_torch_gpu def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_stable_diffusion_xl_multi_prompts(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) # forward with single prompt inputs = self.get_dummy_inputs(torch_device) output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = inputs["prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different prompt inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = "different prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # manually set a negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same negative_prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = inputs["negative_prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = "different negative prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # copied from test_stable_diffusion_xl.py def test_stable_diffusion_xl_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 2 * [inputs["prompt"]] inputs["num_images_per_prompt"] = 2 output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds inputs = self.get_dummy_inputs(torch_device) prompt = 2 * [inputs.pop("prompt")] ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = sd_pipe.encode_prompt(prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_controlnet_sdxl_guess(self): device = "cpu" components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["guess_mode"] = True output = sd_pipe(**inputs) image_slice = output.images[0, -3:, -3:, -1] expected_slice = np.array( [0.7330834, 0.590667, 0.5667336, 0.6029023, 0.5679491, 0.5968194, 0.4032986, 0.47612396, 0.5089609] ) # make sure that it's equal assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-4 def test_controlnet_sdxl_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLControlNetPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.7799, 0.614, 0.6162, 0.7082, 0.6662, 0.5833, 0.4148, 0.5182, 0.4866]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 class StableDiffusionXLMultiControlNetPipelineFastTests( PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, SDXLOptionalComponentsTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionXLControlNetPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) torch.manual_seed(0) def init_weights(m): if isinstance(m, torch.nn.Conv2d): torch.nn.init.normal(m.weight) m.bias.data.fill_(1.0) controlnet1 = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), conditioning_embedding_out_channels=(16, 32), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) controlnet1.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) controlnet2 = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), conditioning_embedding_out_channels=(16, 32), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) controlnet2.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") controlnet = MultiControlNetModel([controlnet1, controlnet2]) components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 images = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), ] inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", "image": images, } return inputs def test_control_guidance_switch(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) scale = 10.0 steps = 4 inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_1 = pipe(**inputs)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_3 = pipe(**inputs, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7])[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8])[0] # make sure that all outputs are different assert np.sum(np.abs(output_1 - output_2)) > 1e-3 assert np.sum(np.abs(output_1 - output_3)) > 1e-3 assert np.sum(np.abs(output_1 - output_4)) > 1e-3 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_save_load_optional_components(self): return self._test_save_load_optional_components() class StableDiffusionXLMultiControlNetOneModelPipelineFastTests( PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionXLControlNetPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) torch.manual_seed(0) def init_weights(m): if isinstance(m, torch.nn.Conv2d): torch.nn.init.normal(m.weight) m.bias.data.fill_(1.0) controlnet = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), conditioning_embedding_out_channels=(16, 32), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) controlnet.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") controlnet = MultiControlNetModel([controlnet]) components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 images = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), ] inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", "image": images, } return inputs def test_control_guidance_switch(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) scale = 10.0 steps = 4 inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_1 = pipe(**inputs)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_3 = pipe( **inputs, control_guidance_start=[0.1], control_guidance_end=[0.2], )[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5])[0] # make sure that all outputs are different assert np.sum(np.abs(output_1 - output_2)) > 1e-3 assert np.sum(np.abs(output_1 - output_3)) > 1e-3 assert np.sum(np.abs(output_1 - output_4)) > 1e-3 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_save_load_optional_components(self): self._test_save_load_optional_components() def test_negative_conditions(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slice_without_neg_cond = image[0, -3:, -3:, -1] image = pipe( **inputs, negative_original_size=(512, 512), negative_crops_coords_top_left=(0, 0), negative_target_size=(1024, 1024), ).images image_slice_with_neg_cond = image[0, -3:, -3:, -1] self.assertTrue(np.abs(image_slice_without_neg_cond - image_slice_with_neg_cond).max() > 1e-2) @slow @require_torch_gpu class ControlNetSDXLPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_canny(self): controlnet = ControlNetModel.from_pretrained("diffusers/controlnet-canny-sdxl-1.0") pipe = StableDiffusionXLControlNetPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet ) pipe.enable_sequential_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) images = pipe(prompt, image=image, generator=generator, output_type="np", num_inference_steps=3).images assert images[0].shape == (768, 512, 3) original_image = images[0, -3:, -3:, -1].flatten() expected_image = np.array([0.4185, 0.4127, 0.4089, 0.4046, 0.4115, 0.4096, 0.4081, 0.4112, 0.3913]) assert np.allclose(original_image, expected_image, atol=1e-04) def test_depth(self): controlnet = ControlNetModel.from_pretrained("diffusers/controlnet-depth-sdxl-1.0") pipe = StableDiffusionXLControlNetPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet ) pipe.enable_sequential_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "Stormtrooper's lecture" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/stormtrooper_depth.png" ) images = pipe(prompt, image=image, generator=generator, output_type="np", num_inference_steps=3).images assert images[0].shape == (512, 512, 3) original_image = images[0, -3:, -3:, -1].flatten() expected_image = np.array([0.4399, 0.5112, 0.5478, 0.4314, 0.472, 0.4823, 0.4647, 0.4957, 0.4853]) assert np.allclose(original_image, expected_image, atol=1e-04)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/controlnet/test_controlnet_inpaint.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This model implementation is heavily based on: import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetInpaintPipeline, UNet2DConditionModel, ) from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel from diffusers.utils import load_image from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_numpy, numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class ControlNetInpaintPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset({"control_image"}) # skip `image` and `mask` for now, only test for control_image image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 control_image = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ) init_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) init_image = init_image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(init_image)).convert("RGB").resize((64, 64)) mask_image = Image.fromarray(np.uint8(init_image + 4)).convert("RGB").resize((64, 64)) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": image, "mask_image": mask_image, "control_image": control_image, } return inputs def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) class ControlNetSimpleInpaintPipelineFastTests(ControlNetInpaintPipelineFastTests): pipeline_class = StableDiffusionControlNetInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset([]) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components class MultiControlNetInpaintPipelineFastTests( PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) torch.manual_seed(0) def init_weights(m): if isinstance(m, torch.nn.Conv2d): torch.nn.init.normal(m.weight) m.bias.data.fill_(1.0) controlnet1 = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), ) controlnet1.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) controlnet2 = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), ) controlnet2.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") controlnet = MultiControlNetModel([controlnet1, controlnet2]) components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 control_image = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), ] init_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) init_image = init_image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(init_image)).convert("RGB").resize((64, 64)) mask_image = Image.fromarray(np.uint8(init_image + 4)).convert("RGB").resize((64, 64)) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": image, "mask_image": mask_image, "control_image": control_image, } return inputs def test_control_guidance_switch(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) scale = 10.0 steps = 4 inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_1 = pipe(**inputs)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_3 = pipe(**inputs, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7])[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8])[0] # make sure that all outputs are different assert np.sum(np.abs(output_1 - output_2)) > 1e-3 assert np.sum(np.abs(output_1 - output_3)) > 1e-3 assert np.sum(np.abs(output_1 - output_4)) > 1e-3 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_save_pretrained_raise_not_implemented_exception(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(tmpdir) except NotImplementedError: pass @slow @require_torch_gpu class ControlNetInpaintPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_canny(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) image = load_image( "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512)) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_mask.png" ).resize((512, 512)) prompt = "pitch black hole" control_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512)) output = pipe( prompt, image=image, mask_image=mask_image, control_image=control_image, generator=generator, output_type="np", num_inference_steps=3, ) image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/inpaint.npy" ) assert np.abs(expected_image - image).max() < 9e-2 def test_inpaint(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_inpaint") pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(33) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy.png" ) init_image = init_image.resize((512, 512)) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy_mask.png" ) mask_image = mask_image.resize((512, 512)) prompt = "a handsome man with ray-ban sunglasses" def make_inpaint_condition(image, image_mask): image = np.array(image.convert("RGB")).astype(np.float32) / 255.0 image_mask = np.array(image_mask.convert("L")).astype(np.float32) / 255.0 assert image.shape[0:1] == image_mask.shape[0:1], "image and image_mask must have the same image size" image[image_mask > 0.5] = -1.0 # set as masked pixel image = np.expand_dims(image, 0).transpose(0, 3, 1, 2) image = torch.from_numpy(image) return image control_image = make_inpaint_condition(init_image, mask_image) output = pipe( prompt, image=init_image, mask_image=mask_image, control_image=control_image, guidance_scale=9.0, eta=1.0, generator=generator, num_inference_steps=20, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/boy_ray_ban.npy" ) assert numpy_cosine_similarity_distance(expected_image.flatten(), image.flatten()) < 1e-2 def test_load_local(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") pipe_1 = StableDiffusionControlNetInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) controlnet = ControlNetModel.from_single_file( "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth" ) pipe_2 = StableDiffusionControlNetInpaintPipeline.from_single_file( "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors", safety_checker=None, controlnet=controlnet, ) control_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512)) image = load_image( "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512)) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_mask.png" ).resize((512, 512)) pipes = [pipe_1, pipe_2] images = [] for pipe in pipes: pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird" output = pipe( prompt, image=image, control_image=control_image, mask_image=mask_image, strength=0.9, generator=generator, output_type="np", num_inference_steps=3, ) images.append(output.images[0]) del pipe gc.collect() torch.cuda.empty_cache() assert np.abs(images[0] - images[1]).max() < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/controlnet/test_controlnet_blip_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTokenizer from transformers.models.blip_2.configuration_blip_2 import Blip2Config from transformers.models.clip.configuration_clip import CLIPTextConfig from diffusers import ( AutoencoderKL, BlipDiffusionControlNetPipeline, ControlNetModel, PNDMScheduler, UNet2DConditionModel, ) from diffusers.utils.testing_utils import enable_full_determinism from src.diffusers.pipelines.blip_diffusion.blip_image_processing import BlipImageProcessor from src.diffusers.pipelines.blip_diffusion.modeling_blip2 import Blip2QFormerModel from src.diffusers.pipelines.blip_diffusion.modeling_ctx_clip import ContextCLIPTextModel from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class BlipDiffusionControlNetPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = BlipDiffusionControlNetPipeline params = [ "prompt", "reference_image", "source_subject_category", "target_subject_category", "condtioning_image", ] batch_params = [ "prompt", "reference_image", "source_subject_category", "target_subject_category", "condtioning_image", ] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "neg_prompt", "guidance_scale", "prompt_strength", "prompt_reps", ] def get_dummy_components(self): torch.manual_seed(0) text_encoder_config = CLIPTextConfig( vocab_size=1000, hidden_size=16, intermediate_size=16, projection_dim=16, num_hidden_layers=1, num_attention_heads=1, max_position_embeddings=77, ) text_encoder = ContextCLIPTextModel(text_encoder_config) vae = AutoencoderKL( in_channels=4, out_channels=4, down_block_types=("DownEncoderBlock2D",), up_block_types=("UpDecoderBlock2D",), block_out_channels=(32,), layers_per_block=1, act_fn="silu", latent_channels=4, norm_num_groups=16, sample_size=16, ) blip_vision_config = { "hidden_size": 16, "intermediate_size": 16, "num_hidden_layers": 1, "num_attention_heads": 1, "image_size": 224, "patch_size": 14, "hidden_act": "quick_gelu", } blip_qformer_config = { "vocab_size": 1000, "hidden_size": 16, "num_hidden_layers": 1, "num_attention_heads": 1, "intermediate_size": 16, "max_position_embeddings": 512, "cross_attention_frequency": 1, "encoder_hidden_size": 16, } qformer_config = Blip2Config( vision_config=blip_vision_config, qformer_config=blip_qformer_config, num_query_tokens=16, tokenizer="hf-internal-testing/tiny-random-bert", ) qformer = Blip2QFormerModel(qformer_config) unet = UNet2DConditionModel( block_out_channels=(4, 16), layers_per_block=1, norm_num_groups=4, sample_size=16, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=16, ) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") scheduler = PNDMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", set_alpha_to_one=False, skip_prk_steps=True, ) controlnet = ControlNetModel( block_out_channels=(4, 16), layers_per_block=1, in_channels=4, norm_num_groups=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=16, conditioning_embedding_out_channels=(8, 16), ) vae.eval() qformer.eval() text_encoder.eval() image_processor = BlipImageProcessor() components = { "text_encoder": text_encoder, "vae": vae, "qformer": qformer, "unet": unet, "tokenizer": tokenizer, "scheduler": scheduler, "controlnet": controlnet, "image_processor": image_processor, } return components def get_dummy_inputs(self, device, seed=0): np.random.seed(seed) reference_image = np.random.rand(32, 32, 3) * 255 reference_image = Image.fromarray(reference_image.astype("uint8")).convert("RGBA") cond_image = np.random.rand(32, 32, 3) * 255 cond_image = Image.fromarray(cond_image.astype("uint8")).convert("RGBA") if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "swimming underwater", "generator": generator, "reference_image": reference_image, "condtioning_image": cond_image, "source_subject_category": "dog", "target_subject_category": "dog", "height": 32, "width": 32, "guidance_scale": 7.5, "num_inference_steps": 2, "output_type": "np", } return inputs def test_blipdiffusion_controlnet(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) image = pipe(**self.get_dummy_inputs(device))[0] image_slice = image[0, -3:, -3:, 0] assert image.shape == (1, 16, 16, 4) expected_slice = np.array([0.7953, 0.7136, 0.6597, 0.4779, 0.7389, 0.4111, 0.5826, 0.4150, 0.8422]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/controlnet/test_flax_controlnet.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class FlaxControlNetPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() def test_canny(self): controlnet, controlnet_params = FlaxControlNetModel.from_pretrained( "lllyasviel/sd-controlnet-canny", from_pt=True, dtype=jnp.bfloat16 ) pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16 ) params["controlnet"] = controlnet_params prompts = "bird" num_samples = jax.device_count() prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples) canny_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) processed_image = pipe.prepare_image_inputs([canny_image] * num_samples) rng = jax.random.PRNGKey(0) rng = jax.random.split(rng, jax.device_count()) p_params = replicate(params) prompt_ids = shard(prompt_ids) processed_image = shard(processed_image) images = pipe( prompt_ids=prompt_ids, image=processed_image, params=p_params, prng_seed=rng, num_inference_steps=50, jit=True, ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) image_slice = images[0, 253:256, 253:256, -1] output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) expected_slice = jnp.array( [0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] ) print(f"output_slice: {output_slice}") assert jnp.abs(output_slice - expected_slice).max() < 1e-2 def test_pose(self): controlnet, controlnet_params = FlaxControlNetModel.from_pretrained( "lllyasviel/sd-controlnet-openpose", from_pt=True, dtype=jnp.bfloat16 ) pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16 ) params["controlnet"] = controlnet_params prompts = "Chef in the kitchen" num_samples = jax.device_count() prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples) pose_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" ) processed_image = pipe.prepare_image_inputs([pose_image] * num_samples) rng = jax.random.PRNGKey(0) rng = jax.random.split(rng, jax.device_count()) p_params = replicate(params) prompt_ids = shard(prompt_ids) processed_image = shard(processed_image) images = pipe( prompt_ids=prompt_ids, image=processed_image, params=p_params, prng_seed=rng, num_inference_steps=50, jit=True, ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) image_slice = images[0, 253:256, 253:256, -1] output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) expected_slice = jnp.array( [[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] ) print(f"output_slice: {output_slice}") assert jnp.abs(output_slice - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/controlnet/test_controlnet_inpaint_sdxl.py
# coding=utf-8 # Copyright 2023 Harutatsu Akiyama, Jinbin Bai, and HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, EulerDiscreteScheduler, StableDiffusionXLControlNetInpaintPipeline, UNet2DConditionModel, ) from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class ControlNetPipelineSDXLFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionXLControlNetInpaintPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = frozenset(IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"mask_image", "control_image"})) image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) controlnet = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), conditioning_embedding_out_channels=(16, 32), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, } return components def get_dummy_inputs(self, device, seed=0, img_res=64): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) # Get random floats in [0, 1] as image image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] mask_image = torch.ones_like(image) controlnet_embedder_scale_factor = 2 control_image = ( floats_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), rng=random.Random(seed), ) .to(device) .cpu() ) control_image = control_image.cpu().permute(0, 2, 3, 1)[0] # Convert image and mask_image to [0, 255] image = 255 * image mask_image = 255 * mask_image control_image = 255 * control_image # Convert to PIL image init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((img_res, img_res)) mask_image = Image.fromarray(np.uint8(mask_image)).convert("L").resize((img_res, img_res)) control_image = Image.fromarray(np.uint8(control_image)).convert("RGB").resize((img_res, img_res)) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": init_image, "mask_image": mask_image, "control_image": control_image, } return inputs def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) @require_torch_gpu def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_stable_diffusion_xl_multi_prompts(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) # forward with single prompt inputs = self.get_dummy_inputs(torch_device) output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = inputs["prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different prompt inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = "different prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # manually set a negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same negative_prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = inputs["negative_prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = "different negative prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 def test_controlnet_sdxl_guess(self): device = "cpu" components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["guess_mode"] = True output = sd_pipe(**inputs) image_slice = output.images[0, -3:, -3:, -1] expected_slice = np.array( [0.5381963, 0.4836803, 0.45821992, 0.5577731, 0.51210403, 0.4794795, 0.59282357, 0.5647199, 0.43100584] ) # make sure that it's equal assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-4 # TODO(Patrick, Sayak) - skip for now as this requires more refiner tests def test_save_load_optional_components(self): pass def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/controlnet/test_controlnet_sdxl_img2img.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, EulerDiscreteScheduler, StableDiffusionXLControlNetImg2ImgPipeline, UNet2DConditionModel, ) from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class ControlNetPipelineSDXLImg2ImgFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionXLControlNetImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self, skip_first_text_encoder=False): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64 if not skip_first_text_encoder else 32, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), conditioning_embedding_out_channels=(16, 32), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) torch.manual_seed(0) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder if not skip_first_text_encoder else None, "tokenizer": tokenizer if not skip_first_text_encoder else None, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, } return components def get_dummy_inputs(self, device, seed=0): controlnet_embedder_scale_factor = 2 image = floats_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), rng=random.Random(seed), ).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": image, "control_image": image, } return inputs def test_stable_diffusion_xl_controlnet_img2img(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.5557202, 0.46418434, 0.46983826, 0.623529, 0.5557242, 0.49262643, 0.6070508, 0.5702978, 0.43777135] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_controlnet_img2img_guess(self): device = "cpu" components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["guess_mode"] = True output = sd_pipe(**inputs) image_slice = output.images[0, -3:, -3:, -1] assert output.images.shape == (1, 64, 64, 3) expected_slice = np.array( [0.5557202, 0.46418434, 0.46983826, 0.623529, 0.5557242, 0.49262643, 0.6070508, 0.5702978, 0.43777135] ) # make sure that it's equal assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) # TODO(Patrick, Sayak) - skip for now as this requires more refiner tests def test_save_load_optional_components(self): pass @require_torch_gpu def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_stable_diffusion_xl_multi_prompts(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) # forward with single prompt inputs = self.get_dummy_inputs(torch_device) output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = inputs["prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different prompt inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = "different prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # manually set a negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same negative_prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = inputs["negative_prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = "different negative prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # copied from test_stable_diffusion_xl.py def test_stable_diffusion_xl_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 2 * [inputs["prompt"]] inputs["num_images_per_prompt"] = 2 output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds inputs = self.get_dummy_inputs(torch_device) prompt = 2 * [inputs.pop("prompt")] ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = sd_pipe.encode_prompt(prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/controlnet/test_controlnet_img2img.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImg2ImgPipeline, UNet2DConditionModel, ) from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel from diffusers.utils import load_image from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_numpy, require_torch_gpu, slow, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class ControlNetImg2ImgPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"}) image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=1, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 control_image = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ) image = floats_tensor(control_image.shape, rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": image, "control_image": control_image, } return inputs def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) class StableDiffusionMultiControlNetPipelineFastTests( PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=1, ) torch.manual_seed(0) def init_weights(m): if isinstance(m, torch.nn.Conv2d): torch.nn.init.normal(m.weight) m.bias.data.fill_(1.0) controlnet1 = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) controlnet1.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) controlnet2 = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) controlnet2.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") controlnet = MultiControlNetModel([controlnet1, controlnet2]) components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 control_image = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), ] image = floats_tensor(control_image[0].shape, rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": image, "control_image": control_image, } return inputs def test_control_guidance_switch(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) scale = 10.0 steps = 4 inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_1 = pipe(**inputs)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_3 = pipe(**inputs, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7])[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8])[0] # make sure that all outputs are different assert np.sum(np.abs(output_1 - output_2)) > 1e-3 assert np.sum(np.abs(output_1 - output_3)) > 1e-3 assert np.sum(np.abs(output_1 - output_4)) > 1e-3 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_save_pretrained_raise_not_implemented_exception(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(tmpdir) except NotImplementedError: pass @slow @require_torch_gpu class ControlNetImg2ImgPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_canny(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "evil space-punk bird" control_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512)) image = load_image( "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512)) output = pipe( prompt, image, control_image=control_image, generator=generator, output_type="np", num_inference_steps=50, strength=0.6, ) image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" ) assert np.abs(expected_image - image).max() < 9e-2 def test_load_local(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") pipe_1 = StableDiffusionControlNetImg2ImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) controlnet = ControlNetModel.from_single_file( "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth" ) pipe_2 = StableDiffusionControlNetImg2ImgPipeline.from_single_file( "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors", safety_checker=None, controlnet=controlnet, ) control_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512)) image = load_image( "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512)) pipes = [pipe_1, pipe_2] images = [] for pipe in pipes: pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird" output = pipe( prompt, image=image, control_image=control_image, strength=0.9, generator=generator, output_type="np", num_inference_steps=3, ) images.append(output.images[0]) del pipe gc.collect() torch.cuda.empty_cache() assert np.abs(images[0] - images[1]).max() < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/audioldm/test_audioldm.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechT5HifiGan, SpeechT5HifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNet2DConditionModel, ) from diffusers.utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, nightly, torch_device from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class AudioLDMPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = AudioLDMPipeline params = TEXT_TO_AUDIO_PARAMS batch_params = TEXT_TO_AUDIO_BATCH_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=(32, 64), class_embed_type="simple_projection", projection_class_embeddings_input_dim=32, class_embeddings_concat=True, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=1, out_channels=1, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = ClapTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, projection_dim=32, ) text_encoder = ClapTextModelWithProjection(text_encoder_config) tokenizer = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta", model_max_length=77) vocoder_config = SpeechT5HifiGanConfig( model_in_dim=8, sampling_rate=16000, upsample_initial_channel=16, upsample_rates=[2, 2], upsample_kernel_sizes=[4, 4], resblock_kernel_sizes=[3, 7], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]], normalize_before=False, ) vocoder = SpeechT5HifiGan(vocoder_config) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "vocoder": vocoder, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A hammer hitting a wooden surface", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, } return inputs def test_audioldm_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() audioldm_pipe = AudioLDMPipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = audioldm_pipe(**inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) == 256 audio_slice = audio[:10] expected_slice = np.array( [-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] ) assert np.abs(audio_slice - expected_slice).max() < 1e-2 def test_audioldm_prompt_embeds(self): components = self.get_dummy_components() audioldm_pipe = AudioLDMPipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = audioldm_pipe(**inputs) audio_1 = output.audios[0] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] text_inputs = audioldm_pipe.tokenizer( prompt, padding="max_length", max_length=audioldm_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) prompt_embeds = audioldm_pipe.text_encoder( text_inputs, ) prompt_embeds = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state prompt_embeds = F.normalize(prompt_embeds, dim=-1) inputs["prompt_embeds"] = prompt_embeds # forward output = audioldm_pipe(**inputs) audio_2 = output.audios[0] assert np.abs(audio_1 - audio_2).max() < 1e-2 def test_audioldm_negative_prompt_embeds(self): components = self.get_dummy_components() audioldm_pipe = AudioLDMPipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = audioldm_pipe(**inputs) audio_1 = output.audios[0] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] embeds = [] for p in [prompt, negative_prompt]: text_inputs = audioldm_pipe.tokenizer( p, padding="max_length", max_length=audioldm_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) text_embeds = audioldm_pipe.text_encoder( text_inputs, ) text_embeds = text_embeds.text_embeds # additional L_2 normalization over each hidden-state text_embeds = F.normalize(text_embeds, dim=-1) embeds.append(text_embeds) inputs["prompt_embeds"], inputs["negative_prompt_embeds"] = embeds # forward output = audioldm_pipe(**inputs) audio_2 = output.audios[0] assert np.abs(audio_1 - audio_2).max() < 1e-2 def test_audioldm_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) audioldm_pipe = AudioLDMPipeline(**components) audioldm_pipe = audioldm_pipe.to(device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "egg cracking" output = audioldm_pipe(**inputs, negative_prompt=negative_prompt) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) == 256 audio_slice = audio[:10] expected_slice = np.array( [-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] ) assert np.abs(audio_slice - expected_slice).max() < 1e-2 def test_audioldm_num_waveforms_per_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) audioldm_pipe = AudioLDMPipeline(**components) audioldm_pipe = audioldm_pipe.to(device) audioldm_pipe.set_progress_bar_config(disable=None) prompt = "A hammer hitting a wooden surface" # test num_waveforms_per_prompt=1 (default) audios = audioldm_pipe(prompt, num_inference_steps=2).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts batch_size = 2 audios = audioldm_pipe([prompt] * batch_size, num_inference_steps=2).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt num_waveforms_per_prompt = 2 audios = audioldm_pipe(prompt, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts batch_size = 2 audios = audioldm_pipe( [prompt] * batch_size, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def test_audioldm_audio_length_in_s(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() audioldm_pipe = AudioLDMPipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) vocoder_sampling_rate = audioldm_pipe.vocoder.config.sampling_rate inputs = self.get_dummy_inputs(device) output = audioldm_pipe(audio_length_in_s=0.016, **inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) / vocoder_sampling_rate == 0.016 output = audioldm_pipe(audio_length_in_s=0.032, **inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) / vocoder_sampling_rate == 0.032 def test_audioldm_vocoder_model_in_dim(self): components = self.get_dummy_components() audioldm_pipe = AudioLDMPipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) prompt = ["hey"] output = audioldm_pipe(prompt, num_inference_steps=1) audio_shape = output.audios.shape assert audio_shape == (1, 256) config = audioldm_pipe.vocoder.config config.model_in_dim *= 2 audioldm_pipe.vocoder = SpeechT5HifiGan(config).to(torch_device) output = audioldm_pipe(prompt, num_inference_steps=1) audio_shape = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def test_attention_slicing_forward_pass(self): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=False) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical() @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False) @nightly class AudioLDMPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 8, 128, 16)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "A hammer hitting a wooden surface", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 2.5, } return inputs def test_audioldm(self): audioldm_pipe = AudioLDMPipeline.from_pretrained("cvssp/audioldm") audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 25 audio = audioldm_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81920 audio_slice = audio[77230:77240] expected_slice = np.array( [-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] ) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 1e-2 @nightly class AudioLDMPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 8, 128, 16)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "A hammer hitting a wooden surface", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 2.5, } return inputs def test_audioldm_lms(self): audioldm_pipe = AudioLDMPipeline.from_pretrained("cvssp/audioldm") audioldm_pipe.scheduler = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) audio = audioldm_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81920 audio_slice = audio[27780:27790] expected_slice = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212]) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 3e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/ddim/test_ddim.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from diffusers import DDIMPipeline, DDIMScheduler, UNet2DModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class DDIMPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = DDIMPipeline params = UNCONDITIONAL_IMAGE_GENERATION_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params - { "num_images_per_prompt", "latents", "callback", "callback_steps", } batch_params = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), ) scheduler = DDIMScheduler() components = {"unet": unet, "scheduler": scheduler} return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "batch_size": 1, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 32, 32, 3)) expected_slice = np.array( [1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] ) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3) def test_save_load_local(self): super().test_save_load_local(expected_max_difference=3e-3) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class DDIMPipelineIntegrationTests(unittest.TestCase): def test_inference_cifar10(self): model_id = "google/ddpm-cifar10-32" unet = UNet2DModel.from_pretrained(model_id) scheduler = DDIMScheduler() ddim = DDIMPipeline(unet=unet, scheduler=scheduler) ddim.to(torch_device) ddim.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) image = ddim(generator=generator, eta=0.0, output_type="numpy").images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_inference_ema_bedroom(self): model_id = "google/ddpm-ema-bedroom-256" unet = UNet2DModel.from_pretrained(model_id) scheduler = DDIMScheduler.from_pretrained(model_id) ddpm = DDIMPipeline(unet=unet, scheduler=scheduler) ddpm.to(torch_device) ddpm.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) image = ddpm(generator=generator, output_type="numpy").images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) expected_slice = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/audio_diffusion/test_audio_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNet2DConditionModel, UNet2DModel, ) from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device enable_full_determinism() class PipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def dummy_unet(self): torch.manual_seed(0) model = UNet2DModel( sample_size=(32, 64), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("AttnDownBlock2D", "DownBlock2D"), up_block_types=("UpBlock2D", "AttnUpBlock2D"), ) return model @property def dummy_unet_condition(self): torch.manual_seed(0) model = UNet2DConditionModel( sample_size=(64, 32), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("UpBlock2D", "CrossAttnUpBlock2D"), cross_attention_dim=10, ) return model @property def dummy_vqvae_and_unet(self): torch.manual_seed(0) vqvae = AutoencoderKL( sample_size=(128, 64), in_channels=1, out_channels=1, latent_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D"), up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D"), ) unet = UNet2DModel( sample_size=(64, 32), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("AttnDownBlock2D", "DownBlock2D"), up_block_types=("UpBlock2D", "AttnUpBlock2D"), ) return vqvae, unet @nightly def test_audio_diffusion(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator mel = Mel( x_res=self.dummy_unet.config.sample_size[1], y_res=self.dummy_unet.config.sample_size[0], ) scheduler = DDPMScheduler() pipe = AudioDiffusionPipeline(vqvae=None, unet=self.dummy_unet, mel=mel, scheduler=scheduler) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device=device).manual_seed(42) output = pipe(generator=generator, steps=4) audio = output.audios[0] image = output.images[0] generator = torch.Generator(device=device).manual_seed(42) output = pipe(generator=generator, steps=4, return_dict=False) image_from_tuple = output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) image_slice = np.frombuffer(image.tobytes(), dtype="uint8")[:10] image_from_tuple_slice = np.frombuffer(image_from_tuple.tobytes(), dtype="uint8")[:10] expected_slice = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127]) assert np.abs(image_slice.flatten() - expected_slice).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() == 0 mel = Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1], y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0], ) scheduler = DDIMScheduler() dummy_vqvae_and_unet = self.dummy_vqvae_and_unet pipe = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0], unet=dummy_vqvae_and_unet[1], mel=mel, scheduler=scheduler ) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) np.random.seed(0) raw_audio = np.random.uniform(-1, 1, ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,)) generator = torch.Generator(device=device).manual_seed(42) output = pipe(raw_audio=raw_audio, generator=generator, start_step=5, steps=10) image = output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) image_slice = np.frombuffer(image.tobytes(), dtype="uint8")[:10] expected_slice = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121]) assert np.abs(image_slice.flatten() - expected_slice).max() == 0 dummy_unet_condition = self.dummy_unet_condition pipe = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0], unet=dummy_unet_condition, mel=mel, scheduler=scheduler ) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) np.random.seed(0) encoding = torch.rand((1, 1, 10)) output = pipe(generator=generator, encoding=encoding) image = output.images[0] image_slice = np.frombuffer(image.tobytes(), dtype="uint8")[:10] expected_slice = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111]) assert np.abs(image_slice.flatten() - expected_slice).max() == 0 @nightly @require_torch_gpu class PipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_audio_diffusion(self): device = torch_device pipe = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256") pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device=device).manual_seed(42) output = pipe(generator=generator) audio = output.audios[0] image = output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] image_slice = np.frombuffer(image.tobytes(), dtype="uint8")[:10] expected_slice = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26]) assert np.abs(image_slice.flatten() - expected_slice).max() == 0
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/semantic_stable_diffusion/test_semantic_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNet2DConditionModel from diffusers.pipelines.semantic_stable_diffusion import SemanticStableDiffusionPipeline as StableDiffusionPipeline from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, nightly, require_torch_gpu, torch_device, ) enable_full_determinism() class SafeDiffusionPipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def dummy_image(self): batch_size = 1 num_channels = 3 sizes = (32, 32) image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) return image @property def dummy_cond_unet(self): torch.manual_seed(0) model = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) return model @property def dummy_vae(self): torch.manual_seed(0) model = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) return model @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(config) @property def dummy_extractor(self): def extract(*args, **kwargs): class Out: def __init__(self): self.pixel_values = torch.ones([0]) def to(self, device): self.pixel_values.to(device) return self return Out() return extract def test_semantic_diffusion_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unet = self.dummy_cond_unet scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.Generator(device=device).manual_seed(0) output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") image = output.images generator = torch.Generator(device=device).manual_seed(0) image_from_tuple = sd_pipe( [prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np", return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5753, 0.6114, 0.5001, 0.5034, 0.5470, 0.4729, 0.4971, 0.4867, 0.4867]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_semantic_diffusion_pndm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unet = self.dummy_cond_unet scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.Generator(device=device).manual_seed(0) output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") image = output.images generator = torch.Generator(device=device).manual_seed(0) image_from_tuple = sd_pipe( [prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np", return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5122, 0.5712, 0.4825, 0.5053, 0.5646, 0.4769, 0.5179, 0.4894, 0.4994]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_semantic_diffusion_no_safety_checker(self): pipe = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-lms-pipe", safety_checker=None ) assert isinstance(pipe, StableDiffusionPipeline) assert isinstance(pipe.scheduler, LMSDiscreteScheduler) assert pipe.safety_checker is None image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(tmpdirname) pipe = StableDiffusionPipeline.from_pretrained(tmpdirname) # sanity check that the pipeline still works assert pipe.safety_checker is None image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") def test_semantic_diffusion_fp16(self): """Test that stable diffusion works with fp16""" unet = self.dummy_cond_unet scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") # put models in fp16 unet = unet.half() vae = vae.half() bert = bert.half() # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" image = sd_pipe([prompt], num_inference_steps=2, output_type="np").images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class SemanticDiffusionPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_positive_guidance(self): torch_device = "cuda" pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) prompt = "a photo of a cat" edit = { "editing_prompt": ["sunglasses"], "reverse_editing_direction": [False], "edit_warmup_steps": 10, "edit_guidance_scale": 6, "edit_threshold": 0.95, "edit_momentum_scale": 0.5, "edit_mom_beta": 0.6, } seed = 3 guidance_scale = 7 # no sega enabled generator = torch.Generator(torch_device) generator.manual_seed(seed) output = pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [ 0.34673113, 0.38492733, 0.37597352, 0.34086335, 0.35650748, 0.35579205, 0.3384763, 0.34340236, 0.3573271, ] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 # with sega enabled # generator = torch.manual_seed(seed) generator.manual_seed(seed) output = pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, **edit, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [ 0.41887826, 0.37728766, 0.30138272, 0.41416335, 0.41664985, 0.36283392, 0.36191246, 0.43364465, 0.43001732, ] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_negative_guidance(self): torch_device = "cuda" pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) prompt = "an image of a crowded boulevard, realistic, 4k" edit = { "editing_prompt": "crowd, crowded, people", "reverse_editing_direction": True, "edit_warmup_steps": 10, "edit_guidance_scale": 8.3, "edit_threshold": 0.9, "edit_momentum_scale": 0.5, "edit_mom_beta": 0.6, } seed = 9 guidance_scale = 7 # no sega enabled generator = torch.Generator(torch_device) generator.manual_seed(seed) output = pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [ 0.43497998, 0.91814065, 0.7540739, 0.55580205, 0.8467265, 0.5389691, 0.62574506, 0.58897763, 0.50926757, ] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 # with sega enabled # generator = torch.manual_seed(seed) generator.manual_seed(seed) output = pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, **edit, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [ 0.3089719, 0.30500144, 0.29016042, 0.30630964, 0.325687, 0.29419225, 0.2908091, 0.28723598, 0.27696294, ] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_multi_cond_guidance(self): torch_device = "cuda" pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) prompt = "a castle next to a river" edit = { "editing_prompt": ["boat on a river, boat", "monet, impression, sunrise"], "reverse_editing_direction": False, "edit_warmup_steps": [15, 18], "edit_guidance_scale": 6, "edit_threshold": [0.9, 0.8], "edit_momentum_scale": 0.5, "edit_mom_beta": 0.6, } seed = 48 guidance_scale = 7 # no sega enabled generator = torch.Generator(torch_device) generator.manual_seed(seed) output = pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [ 0.75163555, 0.76037145, 0.61785, 0.9189673, 0.8627701, 0.85189694, 0.8512813, 0.87012076, 0.8312857, ] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 # with sega enabled # generator = torch.manual_seed(seed) generator.manual_seed(seed) output = pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, **edit, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [ 0.73553365, 0.7537271, 0.74341905, 0.66480356, 0.6472925, 0.63039416, 0.64812905, 0.6749717, 0.6517102, ] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_guidance_fp16(self): torch_device = "cuda" pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) prompt = "a photo of a cat" edit = { "editing_prompt": ["sunglasses"], "reverse_editing_direction": [False], "edit_warmup_steps": 10, "edit_guidance_scale": 6, "edit_threshold": 0.95, "edit_momentum_scale": 0.5, "edit_mom_beta": 0.6, } seed = 3 guidance_scale = 7 # no sega enabled generator = torch.Generator(torch_device) generator.manual_seed(seed) output = pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [ 0.34887695, 0.3876953, 0.375, 0.34423828, 0.3581543, 0.35717773, 0.3383789, 0.34570312, 0.359375, ] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 # with sega enabled # generator = torch.manual_seed(seed) generator.manual_seed(seed) output = pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, **edit, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [ 0.42285156, 0.36914062, 0.29077148, 0.42041016, 0.41918945, 0.35498047, 0.3618164, 0.4423828, 0.43115234, ] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/ddpm/test_ddpm.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from diffusers import DDPMPipeline, DDPMScheduler, UNet2DModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device enable_full_determinism() class DDPMPipelineFastTests(unittest.TestCase): @property def dummy_uncond_unet(self): torch.manual_seed(0) model = UNet2DModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), ) return model def test_fast_inference(self): device = "cpu" unet = self.dummy_uncond_unet scheduler = DDPMScheduler() ddpm = DDPMPipeline(unet=unet, scheduler=scheduler) ddpm.to(device) ddpm.set_progress_bar_config(disable=None) generator = torch.Generator(device=device).manual_seed(0) image = ddpm(generator=generator, num_inference_steps=2, output_type="numpy").images generator = torch.Generator(device=device).manual_seed(0) image_from_tuple = ddpm(generator=generator, num_inference_steps=2, output_type="numpy", return_dict=False)[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array( [9.956e-01, 5.785e-01, 4.675e-01, 9.930e-01, 0.0, 1.000, 1.199e-03, 2.648e-04, 5.101e-04] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_inference_predict_sample(self): unet = self.dummy_uncond_unet scheduler = DDPMScheduler(prediction_type="sample") ddpm = DDPMPipeline(unet=unet, scheduler=scheduler) ddpm.to(torch_device) ddpm.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) image = ddpm(generator=generator, num_inference_steps=2, output_type="numpy").images generator = torch.manual_seed(0) image_eps = ddpm(generator=generator, num_inference_steps=2, output_type="numpy")[0] image_slice = image[0, -3:, -3:, -1] image_eps_slice = image_eps[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) tolerance = 1e-2 if torch_device != "mps" else 3e-2 assert np.abs(image_slice.flatten() - image_eps_slice.flatten()).max() < tolerance @slow @require_torch_gpu class DDPMPipelineIntegrationTests(unittest.TestCase): def test_inference_cifar10(self): model_id = "google/ddpm-cifar10-32" unet = UNet2DModel.from_pretrained(model_id) scheduler = DDPMScheduler.from_pretrained(model_id) ddpm = DDPMPipeline(unet=unet, scheduler=scheduler) ddpm.to(torch_device) ddpm.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) image = ddpm(generator=generator, output_type="numpy").images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4200, 0.3588, 0.1939, 0.3847, 0.3382, 0.2647, 0.4155, 0.3582, 0.3385]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/pixart/test_pixart.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import unittest import numpy as np import torch from transformers import AutoTokenizer, T5EncoderModel from diffusers import ( AutoencoderKL, DDIMScheduler, PixArtAlphaPipeline, Transformer2DModel, ) from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np enable_full_determinism() class PixArtAlphaPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = PixArtAlphaPipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params def get_dummy_components(self): torch.manual_seed(0) transformer = Transformer2DModel( sample_size=8, num_layers=2, patch_size=2, attention_head_dim=8, num_attention_heads=3, caption_channels=32, in_channels=4, cross_attention_dim=24, out_channels=8, attention_bias=True, activation_fn="gelu-approximate", num_embeds_ada_norm=1000, norm_type="ada_norm_single", norm_elementwise_affine=False, norm_eps=1e-6, ) vae = AutoencoderKL() scheduler = DDIMScheduler() text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") components = { "transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "use_resolution_binning": False, "output_type": "np", } return inputs def test_sequential_cpu_offload_forward_pass(self): # TODO(PVP, Sayak) need to fix later return def test_save_load_optional_components(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) prompt = inputs["prompt"] generator = inputs["generator"] num_inference_steps = inputs["num_inference_steps"] output_type = inputs["output_type"] ( prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask, ) = pipe.encode_prompt(prompt) # inputs with prompt converted to embeddings inputs = { "prompt_embeds": prompt_embeds, "prompt_attention_mask": prompt_attention_mask, "negative_prompt": None, "negative_prompt_embeds": negative_prompt_embeds, "negative_prompt_attention_mask": negative_prompt_attention_mask, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, "use_resolution_binning": False, } # set all optional components to None for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(torch_device) generator = inputs["generator"] num_inference_steps = inputs["num_inference_steps"] output_type = inputs["output_type"] # inputs with prompt converted to embeddings inputs = { "prompt_embeds": prompt_embeds, "prompt_attention_mask": prompt_attention_mask, "negative_prompt": None, "negative_prompt_embeds": negative_prompt_embeds, "negative_prompt_attention_mask": negative_prompt_attention_mask, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, "use_resolution_binning": False, } output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, 1e-4) def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 8, 8, 3)) expected_slice = np.array([0.5303, 0.2658, 0.7979, 0.1182, 0.3304, 0.4608, 0.5195, 0.4261, 0.4675]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_inference_non_square_images(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs, height=32, width=48).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 32, 48, 3)) expected_slice = np.array([0.3859, 0.2987, 0.2333, 0.5243, 0.6721, 0.4436, 0.5292, 0.5373, 0.4416]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_inference_with_embeddings_and_multiple_images(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) prompt = inputs["prompt"] generator = inputs["generator"] num_inference_steps = inputs["num_inference_steps"] output_type = inputs["output_type"] prompt_embeds, prompt_attn_mask, negative_prompt_embeds, neg_prompt_attn_mask = pipe.encode_prompt(prompt) # inputs with prompt converted to embeddings inputs = { "prompt_embeds": prompt_embeds, "prompt_attention_mask": prompt_attn_mask, "negative_prompt": None, "negative_prompt_embeds": negative_prompt_embeds, "negative_prompt_attention_mask": neg_prompt_attn_mask, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, "num_images_per_prompt": 2, "use_resolution_binning": False, } # set all optional components to None for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(torch_device) generator = inputs["generator"] num_inference_steps = inputs["num_inference_steps"] output_type = inputs["output_type"] # inputs with prompt converted to embeddings inputs = { "prompt_embeds": prompt_embeds, "prompt_attention_mask": prompt_attn_mask, "negative_prompt": None, "negative_prompt_embeds": negative_prompt_embeds, "negative_prompt_attention_mask": neg_prompt_attn_mask, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, "num_images_per_prompt": 2, "use_resolution_binning": False, } output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, 1e-4) def test_inference_with_multiple_images_per_prompt(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["num_images_per_prompt"] = 2 image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (2, 8, 8, 3)) expected_slice = np.array([0.5303, 0.2658, 0.7979, 0.1182, 0.3304, 0.4608, 0.5195, 0.4261, 0.4675]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_raises_warning_for_mask_feature(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs.update({"mask_feature": True}) with self.assertWarns(FutureWarning) as warning_ctx: _ = pipe(**inputs).images assert "mask_feature" in str(warning_ctx.warning) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-3) @slow @require_torch_gpu class PixArtAlphaPipelineIntegrationTests(unittest.TestCase): ckpt_id_1024 = "PixArt-alpha/PixArt-XL-2-1024-MS" ckpt_id_512 = "PixArt-alpha/PixArt-XL-2-512x512" prompt = "A small cactus with a happy face in the Sahara desert." def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_pixart_1024_fast(self): generator = torch.manual_seed(0) pipe = PixArtAlphaPipeline.from_pretrained(self.ckpt_id_1024, torch_dtype=torch.float16) pipe.enable_model_cpu_offload() prompt = self.prompt image = pipe(prompt, generator=generator, num_inference_steps=2, output_type="np").images image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_pixart_512_fast(self): generator = torch.manual_seed(0) pipe = PixArtAlphaPipeline.from_pretrained(self.ckpt_id_512, torch_dtype=torch.float16) pipe.enable_model_cpu_offload() prompt = self.prompt image = pipe(prompt, generator=generator, num_inference_steps=2, output_type="np").images image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_pixart_1024(self): generator = torch.manual_seed(0) pipe = PixArtAlphaPipeline.from_pretrained(self.ckpt_id_1024, torch_dtype=torch.float16) pipe.enable_model_cpu_offload() prompt = self.prompt image = pipe(prompt, generator=generator, output_type="np").images image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.1941, 0.2117, 0.2188, 0.1946, 0.218, 0.2124, 0.199, 0.2437, 0.2583]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_pixart_512(self): generator = torch.manual_seed(0) pipe = PixArtAlphaPipeline.from_pretrained(self.ckpt_id_512, torch_dtype=torch.float16) pipe.enable_model_cpu_offload() prompt = self.prompt image = pipe(prompt, generator=generator, output_type="np").images image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.2637, 0.291, 0.2939, 0.207, 0.2512, 0.2783, 0.2168, 0.2324, 0.2817]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_pixart_1024_without_resolution_binning(self): generator = torch.manual_seed(0) pipe = PixArtAlphaPipeline.from_pretrained(self.ckpt_id_1024, torch_dtype=torch.float16) pipe.enable_model_cpu_offload() prompt = self.prompt height, width = 1024, 768 num_inference_steps = 10 image = pipe( prompt, height=height, width=width, generator=generator, num_inference_steps=num_inference_steps, output_type="np", ).images image_slice = image[0, -3:, -3:, -1] generator = torch.manual_seed(0) no_res_bin_image = pipe( prompt, height=height, width=width, generator=generator, num_inference_steps=num_inference_steps, output_type="np", use_resolution_binning=False, ).images no_res_bin_image_slice = no_res_bin_image[0, -3:, -3:, -1] assert not np.allclose(image_slice, no_res_bin_image_slice, atol=1e-4, rtol=1e-4) def test_pixart_512_without_resolution_binning(self): generator = torch.manual_seed(0) pipe = PixArtAlphaPipeline.from_pretrained(self.ckpt_id_512, torch_dtype=torch.float16) pipe.enable_model_cpu_offload() prompt = self.prompt height, width = 512, 768 num_inference_steps = 10 image = pipe( prompt, height=height, width=width, generator=generator, num_inference_steps=num_inference_steps, output_type="np", ).images image_slice = image[0, -3:, -3:, -1] generator = torch.manual_seed(0) no_res_bin_image = pipe( prompt, height=height, width=width, generator=generator, num_inference_steps=num_inference_steps, output_type="np", use_resolution_binning=False, ).images no_res_bin_image_slice = no_res_bin_image[0, -3:, -3:, -1] assert not np.allclose(image_slice, no_res_bin_image_slice, atol=1e-4, rtol=1e-4)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/shap_e/test_shap_e_img2img.py
# Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImg2ImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils.testing_utils import ( floats_tensor, load_image, load_numpy, nightly, require_torch_gpu, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class ShapEImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = ShapEImg2ImgPipeline params = ["image"] batch_params = ["image"] required_optional_params = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] test_xformers_attention = False @property def text_embedder_hidden_size(self): return 16 @property def time_input_dim(self): return 16 @property def time_embed_dim(self): return self.time_input_dim * 4 @property def renderer_dim(self): return 8 @property def dummy_image_encoder(self): torch.manual_seed(0) config = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size, image_size=32, projection_dim=self.text_embedder_hidden_size, intermediate_size=24, num_attention_heads=2, num_channels=3, num_hidden_layers=5, patch_size=1, ) model = CLIPVisionModel(config) return model @property def dummy_image_processor(self): image_processor = CLIPImageProcessor( crop_size=224, do_center_crop=True, do_normalize=True, do_resize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], resample=3, size=224, ) return image_processor @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = { "num_attention_heads": 2, "attention_head_dim": 16, "embedding_dim": self.time_input_dim, "num_embeddings": 32, "embedding_proj_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "num_layers": 1, "clip_embed_dim": self.time_input_dim * 2, "additional_embeddings": 0, "time_embed_act_fn": "gelu", "norm_in_type": "layer", "embedding_proj_norm_type": "layer", "encoder_hid_proj_type": None, "added_emb_type": None, } model = PriorTransformer(**model_kwargs) return model @property def dummy_renderer(self): torch.manual_seed(0) model_kwargs = { "param_shapes": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), "d_latent": self.time_input_dim, "d_hidden": self.renderer_dim, "n_output": 12, "background": ( 0.1, 0.1, 0.1, ), } model = ShapERenderer(**model_kwargs) return model def get_dummy_components(self): prior = self.dummy_prior image_encoder = self.dummy_image_encoder image_processor = self.dummy_image_processor shap_e_renderer = self.dummy_renderer scheduler = HeunDiscreteScheduler( beta_schedule="exp", num_train_timesteps=1024, prediction_type="sample", use_karras_sigmas=True, clip_sample=True, clip_sample_range=1.0, ) components = { "prior": prior, "image_encoder": image_encoder, "image_processor": image_processor, "shap_e_renderer": shap_e_renderer, "scheduler": scheduler, } return components def get_dummy_inputs(self, device, seed=0): input_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": input_image, "generator": generator, "num_inference_steps": 1, "frame_size": 32, "output_type": "latent", } return inputs def test_shap_e(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images[0] image_slice = image[-3:, -3:].cpu().numpy() assert image.shape == (32, 16) expected_slice = np.array( [-1.0, 0.40668195, 0.57322013, -0.9469888, 0.4283227, 0.30348337, -0.81094897, 0.74555075, 0.15342723] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_inference_batch_consistent(self): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[2]) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical( batch_size=2, expected_max_diff=6e-3, ) def test_num_images_per_prompt(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) batch_size = 1 num_images_per_prompt = 2 inputs = self.get_dummy_inputs(torch_device) for key in inputs.keys(): if key in self.batch_params: inputs[key] = batch_size * [inputs[key]] images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0] assert images.shape[0] == batch_size * num_images_per_prompt def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) def test_save_load_local(self): super().test_save_load_local(expected_max_difference=5e-3) @unittest.skip("Key error is raised with accelerate") def test_sequential_cpu_offload_forward_pass(self): pass @nightly @require_torch_gpu class ShapEImg2ImgPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_shap_e_img2img(self): input_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/test_shap_e_img2img_out.npy" ) pipe = ShapEImg2ImgPipeline.from_pretrained("openai/shap-e-img2img") pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device=torch_device).manual_seed(0) images = pipe( input_image, generator=generator, guidance_scale=3.0, num_inference_steps=64, frame_size=64, output_type="np", ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(images, expected_image)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/shap_e/test_shap_e.py
# Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils.testing_utils import load_numpy, nightly, require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class ShapEPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = ShapEPipeline params = ["prompt"] batch_params = ["prompt"] required_optional_params = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] test_xformers_attention = False @property def text_embedder_hidden_size(self): return 16 @property def time_input_dim(self): return 16 @property def time_embed_dim(self): return self.time_input_dim * 4 @property def renderer_dim(self): return 8 @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModelWithProjection(config) @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = { "num_attention_heads": 2, "attention_head_dim": 16, "embedding_dim": self.time_input_dim, "num_embeddings": 32, "embedding_proj_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "num_layers": 1, "clip_embed_dim": self.time_input_dim * 2, "additional_embeddings": 0, "time_embed_act_fn": "gelu", "norm_in_type": "layer", "encoder_hid_proj_type": None, "added_emb_type": None, } model = PriorTransformer(**model_kwargs) return model @property def dummy_renderer(self): torch.manual_seed(0) model_kwargs = { "param_shapes": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), "d_latent": self.time_input_dim, "d_hidden": self.renderer_dim, "n_output": 12, "background": ( 0.1, 0.1, 0.1, ), } model = ShapERenderer(**model_kwargs) return model def get_dummy_components(self): prior = self.dummy_prior text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer shap_e_renderer = self.dummy_renderer scheduler = HeunDiscreteScheduler( beta_schedule="exp", num_train_timesteps=1024, prediction_type="sample", use_karras_sigmas=True, clip_sample=True, clip_sample_range=1.0, ) components = { "prior": prior, "text_encoder": text_encoder, "tokenizer": tokenizer, "shap_e_renderer": shap_e_renderer, "scheduler": scheduler, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "generator": generator, "num_inference_steps": 1, "frame_size": 32, "output_type": "latent", } return inputs def test_shap_e(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images[0] image = image.cpu().numpy() image_slice = image[-3:, -3:] assert image.shape == (32, 16) expected_slice = np.array([-1.0000, -0.6241, 1.0000, -0.8978, -0.6866, 0.7876, -0.7473, -0.2874, 0.6103]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_inference_batch_consistent(self): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2]) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(batch_size=2, expected_max_diff=6e-3) def test_num_images_per_prompt(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) batch_size = 1 num_images_per_prompt = 2 inputs = self.get_dummy_inputs(torch_device) for key in inputs.keys(): if key in self.batch_params: inputs[key] = batch_size * [inputs[key]] images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0] assert images.shape[0] == batch_size * num_images_per_prompt def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) def test_save_load_local(self): super().test_save_load_local(expected_max_difference=5e-3) @unittest.skip("Key error is raised with accelerate") def test_sequential_cpu_offload_forward_pass(self): pass @nightly @require_torch_gpu class ShapEPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_shap_e(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/test_shap_e_np_out.npy" ) pipe = ShapEPipeline.from_pretrained("openai/shap-e") pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device=torch_device).manual_seed(0) images = pipe( "a shark", generator=generator, guidance_scale=15.0, num_inference_steps=64, frame_size=64, output_type="np", ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(images, expected_image)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/score_sde_ve/test_score_sde_ve.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNet2DModel from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch, torch_device enable_full_determinism() class ScoreSdeVeipelineFastTests(unittest.TestCase): @property def dummy_uncond_unet(self): torch.manual_seed(0) model = UNet2DModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), ) return model def test_inference(self): unet = self.dummy_uncond_unet scheduler = ScoreSdeVeScheduler() sde_ve = ScoreSdeVePipeline(unet=unet, scheduler=scheduler) sde_ve.to(torch_device) sde_ve.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) image = sde_ve(num_inference_steps=2, output_type="numpy", generator=generator).images generator = torch.manual_seed(0) image_from_tuple = sde_ve(num_inference_steps=2, output_type="numpy", generator=generator, return_dict=False)[ 0 ] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @nightly @require_torch class ScoreSdeVePipelineIntegrationTests(unittest.TestCase): def test_inference(self): model_id = "google/ncsnpp-church-256" model = UNet2DModel.from_pretrained(model_id) scheduler = ScoreSdeVeScheduler.from_pretrained(model_id) sde_ve = ScoreSdeVePipeline(unet=model, scheduler=scheduler) sde_ve.to(torch_device) sde_ve.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) image = sde_ve(num_inference_steps=10, output_type="numpy", generator=generator).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) expected_slice = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/ip_adapters/test_ip_adapter_stable_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPVisionModelWithProjection, ) from diffusers import ( StableDiffusionImg2ImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionPipeline, StableDiffusionXLImg2ImgPipeline, StableDiffusionXLInpaintPipeline, StableDiffusionXLPipeline, ) from diffusers.utils import load_image from diffusers.utils.testing_utils import ( enable_full_determinism, require_torch_gpu, slow, torch_device, ) enable_full_determinism() class IPAdapterNightlyTestsMixin(unittest.TestCase): dtype = torch.float16 def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_image_encoder(self, repo_id, subfolder): image_encoder = CLIPVisionModelWithProjection.from_pretrained( repo_id, subfolder=subfolder, torch_dtype=self.dtype ).to(torch_device) return image_encoder def get_image_processor(self, repo_id): image_processor = CLIPImageProcessor.from_pretrained(repo_id) return image_processor def get_dummy_inputs(self, for_image_to_image=False, for_inpainting=False, for_sdxl=False): image = load_image( "https://user-images.githubusercontent.com/24734142/266492875-2d50d223-8475-44f0-a7c6-08b51cb53572.png" ) if for_sdxl: image = image.resize((1024, 1024)) input_kwargs = { "prompt": "best quality, high quality", "negative_prompt": "monochrome, lowres, bad anatomy, worst quality, low quality", "num_inference_steps": 5, "generator": torch.Generator(device="cpu").manual_seed(33), "ip_adapter_image": image, "output_type": "np", } if for_image_to_image: image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/vermeer.jpg") ip_image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/river.png") if for_sdxl: image = image.resize((1024, 1024)) ip_image = ip_image.resize((1024, 1024)) input_kwargs.update({"image": image, "ip_adapter_image": ip_image}) elif for_inpainting: image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/inpaint_image.png") mask = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/mask.png") ip_image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/girl.png") if for_sdxl: image = image.resize((1024, 1024)) mask = mask.resize((1024, 1024)) ip_image = ip_image.resize((1024, 1024)) input_kwargs.update({"image": image, "mask_image": mask, "ip_adapter_image": ip_image}) return input_kwargs @slow @require_torch_gpu class IPAdapterSDIntegrationTests(IPAdapterNightlyTestsMixin): def test_text_to_image(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") inputs = self.get_dummy_inputs() images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array([0.8047, 0.8774, 0.9248, 0.9155, 0.9814, 1.0, 0.9678, 1.0, 1.0]) assert np.allclose(image_slice, expected_slice, atol=1e-4, rtol=1e-4) def test_image_to_image(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionImg2ImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") inputs = self.get_dummy_inputs(for_image_to_image=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array([0.2307, 0.2341, 0.2305, 0.24, 0.2268, 0.25, 0.2322, 0.2588, 0.2935]) assert np.allclose(image_slice, expected_slice, atol=1e-4, rtol=1e-4) def test_inpainting(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") inputs = self.get_dummy_inputs(for_inpainting=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array([0.2705, 0.2395, 0.2209, 0.2312, 0.2102, 0.2104, 0.2178, 0.2065, 0.1997]) assert np.allclose(image_slice, expected_slice, atol=1e-4, rtol=1e-4) @slow @require_torch_gpu class IPAdapterSDXLIntegrationTests(IPAdapterNightlyTestsMixin): def test_text_to_image_sdxl(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="sdxl_models/image_encoder") feature_extractor = self.get_image_processor("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, feature_extractor=feature_extractor, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin") inputs = self.get_dummy_inputs() images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array([0.0968, 0.0959, 0.0852, 0.0912, 0.0948, 0.093, 0.0893, 0.0932, 0.0923]) assert np.allclose(image_slice, expected_slice, atol=1e-4, rtol=1e-4) def test_image_to_image_sdxl(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="sdxl_models/image_encoder") feature_extractor = self.get_image_processor("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") pipeline = StableDiffusionXLImg2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, feature_extractor=feature_extractor, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin") inputs = self.get_dummy_inputs(for_image_to_image=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array([0.0653, 0.0704, 0.0725, 0.0741, 0.0702, 0.0647, 0.0782, 0.0799, 0.0752]) assert np.allclose(image_slice, expected_slice, atol=1e-4, rtol=1e-4) def test_inpainting_sdxl(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="sdxl_models/image_encoder") feature_extractor = self.get_image_processor("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") pipeline = StableDiffusionXLInpaintPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, feature_extractor=feature_extractor, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin") inputs = self.get_dummy_inputs(for_inpainting=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() image_slice.tolist() expected_slice = np.array([0.1418, 0.1493, 0.1428, 0.146, 0.1491, 0.1501, 0.1473, 0.1501, 0.1516]) assert np.allclose(image_slice, expected_slice, atol=1e-4, rtol=1e-4)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/vq_diffusion/test_vq_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import Transformer2DModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils.testing_utils import load_numpy, nightly, require_torch_gpu, torch_device torch.backends.cuda.matmul.allow_tf32 = False class VQDiffusionPipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def num_embed(self): return 12 @property def num_embeds_ada_norm(self): return 12 @property def text_embedder_hidden_size(self): return 32 @property def dummy_vqvae(self): torch.manual_seed(0) model = VQModel( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=3, num_vq_embeddings=self.num_embed, vq_embed_dim=3, ) return model @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(config) @property def dummy_transformer(self): torch.manual_seed(0) height = 12 width = 12 model_kwargs = { "attention_bias": True, "cross_attention_dim": 32, "attention_head_dim": height * width, "num_attention_heads": 1, "num_vector_embeds": self.num_embed, "num_embeds_ada_norm": self.num_embeds_ada_norm, "norm_num_groups": 32, "sample_size": width, "activation_fn": "geglu-approximate", } model = Transformer2DModel(**model_kwargs) return model def test_vq_diffusion(self): device = "cpu" vqvae = self.dummy_vqvae text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer transformer = self.dummy_transformer scheduler = VQDiffusionScheduler(self.num_embed) learned_classifier_free_sampling_embeddings = LearnedClassifierFreeSamplingEmbeddings(learnable=False) pipe = VQDiffusionPipeline( vqvae=vqvae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, learned_classifier_free_sampling_embeddings=learned_classifier_free_sampling_embeddings, ) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) prompt = "teddy bear playing in the pool" generator = torch.Generator(device=device).manual_seed(0) output = pipe([prompt], generator=generator, num_inference_steps=2, output_type="np") image = output.images generator = torch.Generator(device=device).manual_seed(0) image_from_tuple = pipe( [prompt], generator=generator, output_type="np", return_dict=False, num_inference_steps=2 )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) expected_slice = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_vq_diffusion_classifier_free_sampling(self): device = "cpu" vqvae = self.dummy_vqvae text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer transformer = self.dummy_transformer scheduler = VQDiffusionScheduler(self.num_embed) learned_classifier_free_sampling_embeddings = LearnedClassifierFreeSamplingEmbeddings( learnable=True, hidden_size=self.text_embedder_hidden_size, length=tokenizer.model_max_length ) pipe = VQDiffusionPipeline( vqvae=vqvae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, learned_classifier_free_sampling_embeddings=learned_classifier_free_sampling_embeddings, ) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) prompt = "teddy bear playing in the pool" generator = torch.Generator(device=device).manual_seed(0) output = pipe([prompt], generator=generator, num_inference_steps=2, output_type="np") image = output.images generator = torch.Generator(device=device).manual_seed(0) image_from_tuple = pipe( [prompt], generator=generator, output_type="np", return_dict=False, num_inference_steps=2 )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) expected_slice = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988]) assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @nightly @require_torch_gpu class VQDiffusionPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_vq_diffusion_classifier_free_sampling(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" ) pipeline = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq") pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though generator = torch.Generator(device=torch_device).manual_seed(0) output = pipeline( "teddy bear playing in the pool", num_images_per_prompt=1, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image).max() < 2.0
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/dance_diffusion/test_dance_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNet1DModel from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, skip_mps, torch_device from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class DanceDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = DanceDiffusionPipeline params = UNCONDITIONAL_AUDIO_GENERATION_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params - { "callback", "latents", "callback_steps", "output_type", "num_images_per_prompt", } batch_params = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS test_attention_slicing = False def get_dummy_components(self): torch.manual_seed(0) unet = UNet1DModel( block_out_channels=(32, 32, 64), extra_in_channels=16, sample_size=512, sample_rate=16_000, in_channels=2, out_channels=2, flip_sin_to_cos=True, use_timestep_embedding=False, time_embedding_type="fourier", mid_block_type="UNetMidBlock1D", down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D"), up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip"), ) scheduler = IPNDMScheduler() components = { "unet": unet, "scheduler": scheduler, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "batch_size": 1, "generator": generator, "num_inference_steps": 4, } return inputs def test_dance_diffusion(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = DanceDiffusionPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = pipe(**inputs) audio = output.audios audio_slice = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) expected_slice = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000]) assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2 @skip_mps def test_save_load_local(self): return super().test_save_load_local() @skip_mps def test_dict_tuple_outputs_equivalent(self): return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3) @skip_mps def test_save_load_optional_components(self): return super().test_save_load_optional_components() @skip_mps def test_attention_slicing_forward_pass(self): return super().test_attention_slicing_forward_pass() def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @nightly @require_torch_gpu class PipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_dance_diffusion(self): device = torch_device pipe = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k") pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) output = pipe(generator=generator, num_inference_steps=100, audio_length_in_s=4.096) audio = output.audios audio_slice = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) expected_slice = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020]) assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2 def test_dance_diffusion_fp16(self): device = torch_device pipe = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k", torch_dtype=torch.float16) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) output = pipe(generator=generator, num_inference_steps=100, audio_length_in_s=4.096) audio = output.audios audio_slice = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) expected_slice = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341]) assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/unclip/test_unclip.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import PriorTransformer, UnCLIPPipeline, UnCLIPScheduler, UNet2DConditionModel, UNet2DModel from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, skip_mps, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UnCLIPPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = UnCLIPPipeline params = TEXT_TO_IMAGE_PARAMS - { "negative_prompt", "height", "width", "negative_prompt_embeds", "guidance_scale", "prompt_embeds", "cross_attention_kwargs", } batch_params = TEXT_TO_IMAGE_BATCH_PARAMS required_optional_params = [ "generator", "return_dict", "prior_num_inference_steps", "decoder_num_inference_steps", "super_res_num_inference_steps", ] test_xformers_attention = False @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 100 @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModelWithProjection(config) @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = { "num_attention_heads": 2, "attention_head_dim": 12, "embedding_dim": self.text_embedder_hidden_size, "num_layers": 1, } model = PriorTransformer(**model_kwargs) return model @property def dummy_text_proj(self): torch.manual_seed(0) model_kwargs = { "clip_embeddings_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "cross_attention_dim": self.cross_attention_dim, } model = UnCLIPTextProjModel(**model_kwargs) return model @property def dummy_decoder(self): torch.manual_seed(0) model_kwargs = { "sample_size": 32, # RGB in channels "in_channels": 3, # Out channels is double in channels because predicts mean and variance "out_channels": 6, "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": "identity", } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_super_res_kwargs(self): return { "sample_size": 64, "layers_per_block": 1, "down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"), "up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"), "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "in_channels": 6, "out_channels": 3, } @property def dummy_super_res_first(self): torch.manual_seed(0) model = UNet2DModel(**self.dummy_super_res_kwargs) return model @property def dummy_super_res_last(self): # seeded differently to get different unet than `self.dummy_super_res_first` torch.manual_seed(1) model = UNet2DModel(**self.dummy_super_res_kwargs) return model def get_dummy_components(self): prior = self.dummy_prior decoder = self.dummy_decoder text_proj = self.dummy_text_proj text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer super_res_first = self.dummy_super_res_first super_res_last = self.dummy_super_res_last prior_scheduler = UnCLIPScheduler( variance_type="fixed_small_log", prediction_type="sample", num_train_timesteps=1000, clip_sample_range=5.0, ) decoder_scheduler = UnCLIPScheduler( variance_type="learned_range", prediction_type="epsilon", num_train_timesteps=1000, ) super_res_scheduler = UnCLIPScheduler( variance_type="fixed_small_log", prediction_type="epsilon", num_train_timesteps=1000, ) components = { "prior": prior, "decoder": decoder, "text_proj": text_proj, "text_encoder": text_encoder, "tokenizer": tokenizer, "super_res_first": super_res_first, "super_res_last": super_res_last, "prior_scheduler": prior_scheduler, "decoder_scheduler": decoder_scheduler, "super_res_scheduler": super_res_scheduler, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "generator": generator, "prior_num_inference_steps": 2, "decoder_num_inference_steps": 2, "super_res_num_inference_steps": 2, "output_type": "numpy", } return inputs def test_unclip(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [ 0.9997, 0.9988, 0.0028, 0.9997, 0.9984, 0.9965, 0.0029, 0.9986, 0.0025, ] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_unclip_passed_text_embed(self): device = torch.device("cpu") class DummyScheduler: init_noise_sigma = 1 components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) prior = components["prior"] decoder = components["decoder"] super_res_first = components["super_res_first"] tokenizer = components["tokenizer"] text_encoder = components["text_encoder"] generator = torch.Generator(device=device).manual_seed(0) dtype = prior.dtype batch_size = 1 shape = (batch_size, prior.config.embedding_dim) prior_latents = pipe.prepare_latents( shape, dtype=dtype, device=device, generator=generator, latents=None, scheduler=DummyScheduler() ) shape = (batch_size, decoder.config.in_channels, decoder.config.sample_size, decoder.config.sample_size) decoder_latents = pipe.prepare_latents( shape, dtype=dtype, device=device, generator=generator, latents=None, scheduler=DummyScheduler() ) shape = ( batch_size, super_res_first.config.in_channels // 2, super_res_first.config.sample_size, super_res_first.config.sample_size, ) super_res_latents = pipe.prepare_latents( shape, dtype=dtype, device=device, generator=generator, latents=None, scheduler=DummyScheduler() ) pipe.set_progress_bar_config(disable=None) prompt = "this is a prompt example" generator = torch.Generator(device=device).manual_seed(0) output = pipe( [prompt], generator=generator, prior_num_inference_steps=2, decoder_num_inference_steps=2, super_res_num_inference_steps=2, prior_latents=prior_latents, decoder_latents=decoder_latents, super_res_latents=super_res_latents, output_type="np", ) image = output.images text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, return_tensors="pt", ) text_model_output = text_encoder(text_inputs.input_ids) text_attention_mask = text_inputs.attention_mask generator = torch.Generator(device=device).manual_seed(0) image_from_text = pipe( generator=generator, prior_num_inference_steps=2, decoder_num_inference_steps=2, super_res_num_inference_steps=2, prior_latents=prior_latents, decoder_latents=decoder_latents, super_res_latents=super_res_latents, text_model_output=text_model_output, text_attention_mask=text_attention_mask, output_type="np", )[0] # make sure passing text embeddings manually is identical assert np.abs(image - image_from_text).max() < 1e-4 # Overriding PipelineTesterMixin::test_attention_slicing_forward_pass # because UnCLIP GPU undeterminism requires a looser check. @skip_mps def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" self._test_attention_slicing_forward_pass(test_max_difference=test_max_difference, expected_max_diff=0.01) # Overriding PipelineTesterMixin::test_inference_batch_single_identical # because UnCLIP undeterminism requires a looser check. @skip_mps def test_inference_batch_single_identical(self): additional_params_copy_to_batched_inputs = [ "prior_num_inference_steps", "decoder_num_inference_steps", "super_res_num_inference_steps", ] self._test_inference_batch_single_identical( additional_params_copy_to_batched_inputs=additional_params_copy_to_batched_inputs, expected_max_diff=5e-3 ) def test_inference_batch_consistent(self): additional_params_copy_to_batched_inputs = [ "prior_num_inference_steps", "decoder_num_inference_steps", "super_res_num_inference_steps", ] if torch_device == "mps": # TODO: MPS errors with larger batch sizes batch_sizes = [2, 3] self._test_inference_batch_consistent( batch_sizes=batch_sizes, additional_params_copy_to_batched_inputs=additional_params_copy_to_batched_inputs, ) else: self._test_inference_batch_consistent( additional_params_copy_to_batched_inputs=additional_params_copy_to_batched_inputs ) @skip_mps def test_dict_tuple_outputs_equivalent(self): return super().test_dict_tuple_outputs_equivalent() @skip_mps def test_save_load_local(self): return super().test_save_load_local(expected_max_difference=5e-3) @skip_mps def test_save_load_optional_components(self): return super().test_save_load_optional_components() @unittest.skip("UnCLIP produces very large differences in fp16 vs fp32. Test is not useful.") def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1.0) @nightly class UnCLIPPipelineCPUIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_unclip_karlo_cpu_fp32(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/unclip/karlo_v1_alpha_horse_cpu.npy" ) pipeline = UnCLIPPipeline.from_pretrained("kakaobrain/karlo-v1-alpha") pipeline.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) output = pipeline( "horse", num_images_per_prompt=1, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image).max() < 1e-1 @nightly @require_torch_gpu class UnCLIPPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_unclip_karlo(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/unclip/karlo_v1_alpha_horse_fp16.npy" ) pipeline = UnCLIPPipeline.from_pretrained("kakaobrain/karlo-v1-alpha", torch_dtype=torch.float16) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) output = pipeline( "horse", generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (256, 256, 3) assert_mean_pixel_difference(image, expected_image) def test_unclip_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = UnCLIPPipeline.from_pretrained("kakaobrain/karlo-v1-alpha", torch_dtype=torch.float16) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _ = pipe( "horse", num_images_per_prompt=1, prior_num_inference_steps=2, decoder_num_inference_steps=2, super_res_num_inference_steps=2, output_type="np", ) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/unclip/test_unclip_image_variation.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( DiffusionPipeline, UnCLIPImageVariationPipeline, UnCLIPScheduler, UNet2DConditionModel, UNet2DModel, ) from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, require_torch_gpu, skip_mps, torch_device, ) from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UnCLIPImageVariationPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = UnCLIPImageVariationPipeline params = IMAGE_VARIATION_PARAMS - {"height", "width", "guidance_scale"} batch_params = IMAGE_VARIATION_BATCH_PARAMS required_optional_params = [ "generator", "return_dict", "decoder_num_inference_steps", "super_res_num_inference_steps", ] test_xformers_attention = False @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 100 @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModelWithProjection(config) @property def dummy_image_encoder(self): torch.manual_seed(0) config = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) return CLIPVisionModelWithProjection(config) @property def dummy_text_proj(self): torch.manual_seed(0) model_kwargs = { "clip_embeddings_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "cross_attention_dim": self.cross_attention_dim, } model = UnCLIPTextProjModel(**model_kwargs) return model @property def dummy_decoder(self): torch.manual_seed(0) model_kwargs = { "sample_size": 32, # RGB in channels "in_channels": 3, # Out channels is double in channels because predicts mean and variance "out_channels": 6, "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": "identity", } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_super_res_kwargs(self): return { "sample_size": 64, "layers_per_block": 1, "down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"), "up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"), "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "in_channels": 6, "out_channels": 3, } @property def dummy_super_res_first(self): torch.manual_seed(0) model = UNet2DModel(**self.dummy_super_res_kwargs) return model @property def dummy_super_res_last(self): # seeded differently to get different unet than `self.dummy_super_res_first` torch.manual_seed(1) model = UNet2DModel(**self.dummy_super_res_kwargs) return model def get_dummy_components(self): decoder = self.dummy_decoder text_proj = self.dummy_text_proj text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer super_res_first = self.dummy_super_res_first super_res_last = self.dummy_super_res_last decoder_scheduler = UnCLIPScheduler( variance_type="learned_range", prediction_type="epsilon", num_train_timesteps=1000, ) super_res_scheduler = UnCLIPScheduler( variance_type="fixed_small_log", prediction_type="epsilon", num_train_timesteps=1000, ) feature_extractor = CLIPImageProcessor(crop_size=32, size=32) image_encoder = self.dummy_image_encoder return { "decoder": decoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_proj": text_proj, "feature_extractor": feature_extractor, "image_encoder": image_encoder, "super_res_first": super_res_first, "super_res_last": super_res_last, "decoder_scheduler": decoder_scheduler, "super_res_scheduler": super_res_scheduler, } def get_dummy_inputs(self, device, seed=0, pil_image=True): input_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) if pil_image: input_image = input_image * 0.5 + 0.5 input_image = input_image.clamp(0, 1) input_image = input_image.cpu().permute(0, 2, 3, 1).float().numpy() input_image = DiffusionPipeline.numpy_to_pil(input_image)[0] return { "image": input_image, "generator": generator, "decoder_num_inference_steps": 2, "super_res_num_inference_steps": 2, "output_type": "np", } def test_unclip_image_variation_input_tensor(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) pipeline_inputs = self.get_dummy_inputs(device, pil_image=False) output = pipe(**pipeline_inputs) image = output.images tuple_pipeline_inputs = self.get_dummy_inputs(device, pil_image=False) image_from_tuple = pipe( **tuple_pipeline_inputs, return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [ 0.9997, 0.0002, 0.9997, 0.9997, 0.9969, 0.0023, 0.9997, 0.9969, 0.9970, ] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_unclip_image_variation_input_image(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) pipeline_inputs = self.get_dummy_inputs(device, pil_image=True) output = pipe(**pipeline_inputs) image = output.images tuple_pipeline_inputs = self.get_dummy_inputs(device, pil_image=True) image_from_tuple = pipe( **tuple_pipeline_inputs, return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_unclip_image_variation_input_list_images(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) pipeline_inputs = self.get_dummy_inputs(device, pil_image=True) pipeline_inputs["image"] = [ pipeline_inputs["image"], pipeline_inputs["image"], ] output = pipe(**pipeline_inputs) image = output.images tuple_pipeline_inputs = self.get_dummy_inputs(device, pil_image=True) tuple_pipeline_inputs["image"] = [ tuple_pipeline_inputs["image"], tuple_pipeline_inputs["image"], ] image_from_tuple = pipe( **tuple_pipeline_inputs, return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (2, 64, 64, 3) expected_slice = np.array( [ 0.9997, 0.9989, 0.0008, 0.0021, 0.9960, 0.0018, 0.0014, 0.0002, 0.9933, ] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_unclip_passed_image_embed(self): device = torch.device("cpu") class DummyScheduler: init_noise_sigma = 1 components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device=device).manual_seed(0) dtype = pipe.decoder.dtype batch_size = 1 shape = ( batch_size, pipe.decoder.config.in_channels, pipe.decoder.config.sample_size, pipe.decoder.config.sample_size, ) decoder_latents = pipe.prepare_latents( shape, dtype=dtype, device=device, generator=generator, latents=None, scheduler=DummyScheduler() ) shape = ( batch_size, pipe.super_res_first.config.in_channels // 2, pipe.super_res_first.config.sample_size, pipe.super_res_first.config.sample_size, ) super_res_latents = pipe.prepare_latents( shape, dtype=dtype, device=device, generator=generator, latents=None, scheduler=DummyScheduler() ) pipeline_inputs = self.get_dummy_inputs(device, pil_image=False) img_out_1 = pipe( **pipeline_inputs, decoder_latents=decoder_latents, super_res_latents=super_res_latents ).images pipeline_inputs = self.get_dummy_inputs(device, pil_image=False) # Don't pass image, instead pass embedding image = pipeline_inputs.pop("image") image_embeddings = pipe.image_encoder(image).image_embeds img_out_2 = pipe( **pipeline_inputs, decoder_latents=decoder_latents, super_res_latents=super_res_latents, image_embeddings=image_embeddings, ).images # make sure passing text embeddings manually is identical assert np.abs(img_out_1 - img_out_2).max() < 1e-4 # Overriding PipelineTesterMixin::test_attention_slicing_forward_pass # because UnCLIP GPU undeterminism requires a looser check. @skip_mps def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" # Check is relaxed because there is not a torch 2.0 sliced attention added kv processor expected_max_diff = 1e-2 self._test_attention_slicing_forward_pass( test_max_difference=test_max_difference, expected_max_diff=expected_max_diff ) # Overriding PipelineTesterMixin::test_inference_batch_single_identical # because UnCLIP undeterminism requires a looser check. @unittest.skip("UnCLIP produces very large differences. Test is not useful.") @skip_mps def test_inference_batch_single_identical(self): additional_params_copy_to_batched_inputs = [ "decoder_num_inference_steps", "super_res_num_inference_steps", ] self._test_inference_batch_single_identical( additional_params_copy_to_batched_inputs=additional_params_copy_to_batched_inputs, expected_max_diff=5e-3 ) def test_inference_batch_consistent(self): additional_params_copy_to_batched_inputs = [ "decoder_num_inference_steps", "super_res_num_inference_steps", ] if torch_device == "mps": # TODO: MPS errors with larger batch sizes batch_sizes = [2, 3] self._test_inference_batch_consistent( batch_sizes=batch_sizes, additional_params_copy_to_batched_inputs=additional_params_copy_to_batched_inputs, ) else: self._test_inference_batch_consistent( additional_params_copy_to_batched_inputs=additional_params_copy_to_batched_inputs ) @skip_mps def test_dict_tuple_outputs_equivalent(self): return super().test_dict_tuple_outputs_equivalent() @unittest.skip("UnCLIP produces very large difference. Test is not useful.") @skip_mps def test_save_load_local(self): return super().test_save_load_local(expected_max_difference=4e-3) @skip_mps def test_save_load_optional_components(self): return super().test_save_load_optional_components() @unittest.skip("UnCLIP produces very large difference in fp16 vs fp32. Test is not useful.") def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1.0) @nightly @require_torch_gpu class UnCLIPImageVariationPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_unclip_image_variation_karlo(self): input_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/unclip/karlo_v1_alpha_cat_variation_fp16.npy" ) pipeline = UnCLIPImageVariationPipeline.from_pretrained( "kakaobrain/karlo-v1-alpha-image-variations", torch_dtype=torch.float16 ) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) output = pipeline( input_image, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (256, 256, 3) assert_mean_pixel_difference(image, expected_image, 15)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/paint_by_example/test_paint_by_example.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPImageProcessor, CLIPVisionConfig from diffusers import AutoencoderKL, PaintByExamplePipeline, PNDMScheduler, UNet2DConditionModel from diffusers.pipelines.paint_by_example import PaintByExampleImageEncoder from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, nightly, require_torch_gpu, torch_device, ) from ..pipeline_params import IMAGE_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, IMAGE_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class PaintByExamplePipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = PaintByExamplePipeline params = IMAGE_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = IMAGE_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset([]) # TO_DO: update the image_prams once refactored VaeImageProcessor.preprocess def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) config = CLIPVisionConfig( hidden_size=32, projection_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, image_size=32, patch_size=4, ) image_encoder = PaintByExampleImageEncoder(config, proj_size=32) feature_extractor = CLIPImageProcessor(crop_size=32, size=32) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "image_encoder": image_encoder, "safety_checker": None, "feature_extractor": feature_extractor, } return components def convert_to_pt(self, image): image = np.array(image.convert("RGB")) image = image[None].transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 return image def get_dummy_inputs(self, device="cpu", seed=0): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((64, 64)) example_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((32, 32)) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "example_image": example_image, "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_paint_by_example_inpaint(self): components = self.get_dummy_components() # make sure here that pndm scheduler skips prk pipe = PaintByExamplePipeline(**components) pipe = pipe.to("cpu") pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() output = pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4686, 0.5687, 0.4007, 0.5218, 0.5741, 0.4482, 0.4940, 0.4629, 0.4503]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_paint_by_example_image_tensor(self): device = "cpu" inputs = self.get_dummy_inputs() inputs.pop("mask_image") image = self.convert_to_pt(inputs.pop("image")) mask_image = image.clamp(0, 1) / 2 # make sure here that pndm scheduler skips prk pipe = PaintByExamplePipeline(**self.get_dummy_components()) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(image=image, mask_image=mask_image[:, 0], **inputs) out_1 = output.images image = image.cpu().permute(0, 2, 3, 1)[0] mask_image = mask_image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB") mask_image = Image.fromarray(np.uint8(mask_image)).convert("RGB") output = pipe(**self.get_dummy_inputs()) out_2 = output.images assert out_1.shape == (1, 64, 64, 3) assert np.abs(out_1.flatten() - out_2.flatten()).max() < 5e-2 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @nightly @require_torch_gpu class PaintByExamplePipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_paint_by_example(self): # make sure here that pndm scheduler skips prk init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/paint_by_example/dog_in_bucket.png" ) mask_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/paint_by_example/mask.png" ) example_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/paint_by_example/panda.jpg" ) pipe = PaintByExamplePipeline.from_pretrained("Fantasy-Studio/Paint-by-Example") pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator = torch.manual_seed(321) output = pipe( image=init_image, mask_image=mask_image, example_image=example_image, generator=generator, guidance_scale=5.0, num_inference_steps=50, output_type="np", ) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.4834, 0.4811, 0.4874, 0.5122, 0.5081, 0.5144, 0.5291, 0.5290, 0.5374]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky3/test_kandinsky3.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from PIL import Image from transformers import AutoTokenizer, T5EncoderModel from diffusers import ( AutoPipelineForImage2Image, AutoPipelineForText2Image, Kandinsky3Pipeline, Kandinsky3UNet, VQModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.schedulers.scheduling_ddpm import DDPMScheduler from diffusers.utils.testing_utils import ( enable_full_determinism, load_image, require_torch_gpu, slow, ) from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class Kandinsky3PipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = Kandinsky3Pipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS test_xformers_attention = False @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = Kandinsky3UNet( in_channels=4, time_embedding_dim=4, groups=2, attention_head_dim=4, layers_per_block=3, block_out_channels=(32, 64), cross_attention_dim=4, encoder_hid_dim=32, ) scheduler = DDPMScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="squaredcos_cap_v2", clip_sample=True, thresholding=False, ) torch.manual_seed(0) movq = self.dummy_movq torch.manual_seed(0) text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") components = { "unet": unet, "scheduler": scheduler, "movq": movq, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", "width": 16, "height": 16, } return inputs def test_kandinsky3(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) expected_slice = np.array([0.3768, 0.4373, 0.4865, 0.4890, 0.4299, 0.5122, 0.4921, 0.4924, 0.5599]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) def test_model_cpu_offload_forward_pass(self): # TODO(Yiyi) - this test should work, skipped for time reasons for now pass @slow @require_torch_gpu class Kandinsky3PipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinskyV3(self): pipe = AutoPipelineForText2Image.from_pretrained( "kandinsky-community/kandinsky-3", variant="fp16", torch_dtype=torch.float16 ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) prompt = "A photograph of the inside of a subway train. There are raccoons sitting on the seats. One of them is reading a newspaper. The window shows the city in the background." generator = torch.Generator(device="cpu").manual_seed(0) image = pipe(prompt, num_inference_steps=25, generator=generator).images[0] assert image.size == (1024, 1024) expected_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky3/t2i.png" ) image_processor = VaeImageProcessor() image_np = image_processor.pil_to_numpy(image) expected_image_np = image_processor.pil_to_numpy(expected_image) self.assertTrue(np.allclose(image_np, expected_image_np, atol=5e-2)) def test_kandinskyV3_img2img(self): pipe = AutoPipelineForImage2Image.from_pretrained( "kandinsky-community/kandinsky-3", variant="fp16", torch_dtype=torch.float16 ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky3/t2i.png" ) w, h = 512, 512 image = image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1) prompt = "A painting of the inside of a subway train with tiny raccoons." image = pipe(prompt, image=image, strength=0.75, num_inference_steps=25, generator=generator).images[0] assert image.size == (512, 512) expected_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky3/i2i.png" ) image_processor = VaeImageProcessor() image_np = image_processor.pil_to_numpy(image) expected_image_np = image_processor.pil_to_numpy(expected_image) self.assertTrue(np.allclose(image_np, expected_image_np, atol=5e-2))
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/repaint/test_repaint.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from diffusers import RePaintPipeline, RePaintScheduler, UNet2DModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_image, load_numpy, nightly, require_torch_gpu, skip_mps, torch_device, ) from ..pipeline_params import IMAGE_INPAINTING_BATCH_PARAMS, IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class RepaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = RePaintPipeline params = IMAGE_INPAINTING_PARAMS - {"width", "height", "guidance_scale"} required_optional_params = PipelineTesterMixin.required_optional_params - { "latents", "num_images_per_prompt", "callback", "callback_steps", } batch_params = IMAGE_INPAINTING_BATCH_PARAMS def get_dummy_components(self): torch.manual_seed(0) torch.manual_seed(0) unet = UNet2DModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), ) scheduler = RePaintScheduler() components = {"unet": unet, "scheduler": scheduler} return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) image = np.random.RandomState(seed).standard_normal((1, 3, 32, 32)) image = torch.from_numpy(image).to(device=device, dtype=torch.float32) mask = (image > 0).to(device=device, dtype=torch.float32) inputs = { "image": image, "mask_image": mask, "generator": generator, "num_inference_steps": 5, "eta": 0.0, "jump_length": 2, "jump_n_sample": 2, "output_type": "numpy", } return inputs def test_repaint(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = RePaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([1.0000, 0.5426, 0.5497, 0.2200, 1.0000, 1.0000, 0.5623, 1.0000, 0.6274]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 @skip_mps def test_save_load_local(self): return super().test_save_load_local() # RePaint can hardly be made deterministic since the scheduler is currently always # nondeterministic @unittest.skip("non-deterministic pipeline") def test_inference_batch_single_identical(self): return super().test_inference_batch_single_identical() @skip_mps def test_dict_tuple_outputs_equivalent(self): return super().test_dict_tuple_outputs_equivalent() @skip_mps def test_save_load_optional_components(self): return super().test_save_load_optional_components() @skip_mps def test_attention_slicing_forward_pass(self): return super().test_attention_slicing_forward_pass() @nightly @require_torch_gpu class RepaintPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_celebahq(self): original_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/" "repaint/celeba_hq_256.png" ) mask_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/repaint/mask_256.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/" "repaint/celeba_hq_256_result.npy" ) model_id = "google/ddpm-ema-celebahq-256" unet = UNet2DModel.from_pretrained(model_id) scheduler = RePaintScheduler.from_pretrained(model_id) repaint = RePaintPipeline(unet=unet, scheduler=scheduler).to(torch_device) repaint.set_progress_bar_config(disable=None) repaint.enable_attention_slicing() generator = torch.manual_seed(0) output = repaint( original_image, mask_image, num_inference_steps=250, eta=0.0, jump_length=10, jump_n_sample=10, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image).mean() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/latent_consistency_models/test_latent_consistency_models_img2img.py
import gc import inspect import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, LatentConsistencyModelImg2ImgPipeline, LCMScheduler, UNet2DConditionModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class LatentConsistencyModelImg2ImgPipelineFastTests( PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = LatentConsistencyModelImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "negative_prompt", "negative_prompt_embeds"} required_optional_params = PipelineTesterMixin.required_optional_params - {"latents", "negative_prompt"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=2, time_cond_proj_dim=32, ) scheduler = LCMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=64, layer_norm_eps=1e-05, num_attention_heads=8, num_hidden_layers=3, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "requires_safety_checker": False, } return components def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image / 2 + 0.5 if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", } return inputs def test_lcm_onestep(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["num_inference_steps"] = 1 output = pipe(**inputs) image = output.images assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.4388, 0.3717, 0.2202, 0.7213, 0.6370, 0.3664, 0.5815, 0.6080, 0.4977]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_lcm_multistep(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = pipe(**inputs) image = output.images assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.4150, 0.3719, 0.2479, 0.6333, 0.6024, 0.3778, 0.5036, 0.5420, 0.4678]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] output = pipe(**inputs) image = output.images assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.3994, 0.3471, 0.2540, 0.7030, 0.6193, 0.3645, 0.5777, 0.5850, 0.4965]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=5e-4) # override default test because the final latent variable is "denoised" instead of "latents" def test_callback_inputs(self): sig = inspect.signature(self.pipeline_class.__call__) if not ("callback_on_step_end_tensor_inputs" in sig.parameters and "callback_on_step_end" in sig.parameters): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_test(pipe, i, t, callback_kwargs): missing_callback_inputs = set() for v in pipe._callback_tensor_inputs: if v not in callback_kwargs: missing_callback_inputs.add(v) self.assertTrue( len(missing_callback_inputs) == 0, f"Missing callback tensor inputs: {missing_callback_inputs}" ) last_i = pipe.num_timesteps - 1 if i == last_i: callback_kwargs["denoised"] = torch.zeros_like(callback_kwargs["denoised"]) return callback_kwargs inputs = self.get_dummy_inputs(torch_device) inputs["callback_on_step_end"] = callback_inputs_test inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs inputs["output_type"] = "latent" output = pipe(**inputs)[0] assert output.abs().sum() == 0 @slow @require_torch_gpu class LatentConsistencyModelImg2ImgPipelineSlowTests(unittest.TestCase): def setUp(self): gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_img2img/sketch-mountains-input.png" ) init_image = init_image.resize((512, 512)) inputs = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "np", "image": init_image, } return inputs def test_lcm_onestep(self): pipe = LatentConsistencyModelImg2ImgPipeline.from_pretrained( "SimianLuo/LCM_Dreamshaper_v7", safety_checker=None ) pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 1 image = pipe(**inputs).images assert image.shape == (1, 512, 512, 3) image_slice = image[0, -3:, -3:, -1].flatten() expected_slice = np.array([0.1950, 0.1961, 0.2308, 0.1786, 0.1837, 0.2320, 0.1898, 0.1885, 0.2309]) assert np.abs(image_slice - expected_slice).max() < 1e-3 def test_lcm_multistep(self): pipe = LatentConsistencyModelImg2ImgPipeline.from_pretrained( "SimianLuo/LCM_Dreamshaper_v7", safety_checker=None ) pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = pipe(**inputs).images assert image.shape == (1, 512, 512, 3) image_slice = image[0, -3:, -3:, -1].flatten() expected_slice = np.array([0.3756, 0.3816, 0.3767, 0.3718, 0.3739, 0.3735, 0.3863, 0.3803, 0.3563]) assert np.abs(image_slice - expected_slice).max() < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/latent_consistency_models/test_latent_consistency_models.py
import gc import inspect import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, LatentConsistencyModelPipeline, LCMScheduler, UNet2DConditionModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class LatentConsistencyModelPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_class = LatentConsistencyModelPipeline params = TEXT_TO_IMAGE_PARAMS - {"negative_prompt", "negative_prompt_embeds"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - {"negative_prompt"} image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=2, time_cond_proj_dim=32, ) scheduler = LCMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=64, layer_norm_eps=1e-05, num_attention_heads=8, num_hidden_layers=3, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "requires_safety_checker": False, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", } return inputs def test_lcm_onestep(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = LatentConsistencyModelPipeline(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["num_inference_steps"] = 1 output = pipe(**inputs) image = output.images assert image.shape == (1, 64, 64, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.1441, 0.5304, 0.5452, 0.1361, 0.4011, 0.4370, 0.5326, 0.3492, 0.3637]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_lcm_multistep(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = LatentConsistencyModelPipeline(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = pipe(**inputs) image = output.images assert image.shape == (1, 64, 64, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.1403, 0.5072, 0.5316, 0.1202, 0.3865, 0.4211, 0.5363, 0.3557, 0.3645]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = LatentConsistencyModelPipeline(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] output = pipe(**inputs) image = output.images assert image.shape == (1, 64, 64, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.1403, 0.5072, 0.5316, 0.1202, 0.3865, 0.4211, 0.5363, 0.3557, 0.3645]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=5e-4) # skip because lcm pipeline apply cfg differently def test_callback_cfg(self): pass # override default test because the final latent variable is "denoised" instead of "latents" def test_callback_inputs(self): sig = inspect.signature(self.pipeline_class.__call__) if not ("callback_on_step_end_tensor_inputs" in sig.parameters and "callback_on_step_end" in sig.parameters): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_test(pipe, i, t, callback_kwargs): missing_callback_inputs = set() for v in pipe._callback_tensor_inputs: if v not in callback_kwargs: missing_callback_inputs.add(v) self.assertTrue( len(missing_callback_inputs) == 0, f"Missing callback tensor inputs: {missing_callback_inputs}" ) last_i = pipe.num_timesteps - 1 if i == last_i: callback_kwargs["denoised"] = torch.zeros_like(callback_kwargs["denoised"]) return callback_kwargs inputs = self.get_dummy_inputs(torch_device) inputs["callback_on_step_end"] = callback_inputs_test inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs inputs["output_type"] = "latent" output = pipe(**inputs)[0] assert output.abs().sum() == 0 @slow @require_torch_gpu class LatentConsistencyModelPipelineSlowTests(unittest.TestCase): def setUp(self): gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_lcm_onestep(self): pipe = LatentConsistencyModelPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7", safety_checker=None) pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 1 image = pipe(**inputs).images assert image.shape == (1, 512, 512, 3) image_slice = image[0, -3:, -3:, -1].flatten() expected_slice = np.array([0.1025, 0.0911, 0.0984, 0.0981, 0.0901, 0.0918, 0.1055, 0.0940, 0.0730]) assert np.abs(image_slice - expected_slice).max() < 1e-3 def test_lcm_multistep(self): pipe = LatentConsistencyModelPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7", safety_checker=None) pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = pipe(**inputs).images assert image.shape == (1, 512, 512, 3) image_slice = image[0, -3:, -3:, -1].flatten() expected_slice = np.array([0.01855, 0.01855, 0.01489, 0.01392, 0.01782, 0.01465, 0.01831, 0.02539, 0.0]) assert np.abs(image_slice - expected_slice).max() < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/deepfloyd_if/test_if_inpainting_superresolution.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class IFInpaintingSuperResolutionPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.TestCase): pipeline_class = IFInpaintingSuperResolutionPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"}) required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} def get_dummy_components(self): return self._get_superresolution_dummy_components() def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) image = floats_tensor((1, 3, 16, 16), rng=random.Random(seed)).to(device) original_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) mask_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "original_image": original_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) def test_save_load_optional_components(self): self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_save_load_float16(self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_float16(expected_max_diff=1e-1) def test_attention_slicing_forward_pass(self): self._test_attention_slicing_forward_pass(expected_max_diff=1e-2) def test_save_load_local(self): self._test_save_load_local() def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical( expected_max_diff=1e-2, )
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/deepfloyd_if/test_if.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import torch from diffusers import ( IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class IFPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.TestCase): pipeline_class = IFPipeline params = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} def get_dummy_components(self): return self._get_dummy_components() def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def test_save_load_optional_components(self): self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_save_load_float16(self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_float16(expected_max_diff=1e-1) def test_attention_slicing_forward_pass(self): self._test_attention_slicing_forward_pass(expected_max_diff=1e-2) def test_save_load_local(self): self._test_save_load_local() def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical( expected_max_diff=1e-2, ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) @slow @require_torch_gpu class IFPipelineSlowTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_all(self): # if pipe_1 = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16) pipe_2 = IFSuperResolutionPipeline.from_pretrained( "DeepFloyd/IF-II-L-v1.0", variant="fp16", torch_dtype=torch.float16, text_encoder=None, tokenizer=None ) # pre compute text embeddings and remove T5 to save memory pipe_1.text_encoder.to("cuda") prompt_embeds, negative_prompt_embeds = pipe_1.encode_prompt("anime turtle", device="cuda") del pipe_1.tokenizer del pipe_1.text_encoder gc.collect() pipe_1.tokenizer = None pipe_1.text_encoder = None pipe_1.enable_model_cpu_offload() pipe_2.enable_model_cpu_offload() pipe_1.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_2.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if(pipe_1, pipe_2, prompt_embeds, negative_prompt_embeds) pipe_1.remove_all_hooks() pipe_2.remove_all_hooks() # img2img pipe_1 = IFImg2ImgPipeline(**pipe_1.components) pipe_2 = IFImg2ImgSuperResolutionPipeline(**pipe_2.components) pipe_1.enable_model_cpu_offload() pipe_2.enable_model_cpu_offload() pipe_1.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_2.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if_img2img(pipe_1, pipe_2, prompt_embeds, negative_prompt_embeds) pipe_1.remove_all_hooks() pipe_2.remove_all_hooks() # inpainting pipe_1 = IFInpaintingPipeline(**pipe_1.components) pipe_2 = IFInpaintingSuperResolutionPipeline(**pipe_2.components) pipe_1.enable_model_cpu_offload() pipe_2.enable_model_cpu_offload() pipe_1.unet.set_attn_processor(AttnAddedKVProcessor()) pipe_2.unet.set_attn_processor(AttnAddedKVProcessor()) self._test_if_inpainting(pipe_1, pipe_2, prompt_embeds, negative_prompt_embeds) def _test_if(self, pipe_1, pipe_2, prompt_embeds, negative_prompt_embeds): # pipeline 1 _start_torch_memory_measurement() generator = torch.Generator(device="cpu").manual_seed(0) output = pipe_1( prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, num_inference_steps=2, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (64, 64, 3) mem_bytes = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" ) assert_mean_pixel_difference(image, expected_image) # pipeline 2 _start_torch_memory_measurement() generator = torch.Generator(device="cpu").manual_seed(0) image = floats_tensor((1, 3, 64, 64), rng=random.Random(0)).to(torch_device) output = pipe_2( prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, image=image, generator=generator, num_inference_steps=2, output_type="np", ) image = output.images[0] assert image.shape == (256, 256, 3) mem_bytes = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" ) assert_mean_pixel_difference(image, expected_image) def _test_if_img2img(self, pipe_1, pipe_2, prompt_embeds, negative_prompt_embeds): # pipeline 1 _start_torch_memory_measurement() image = floats_tensor((1, 3, 64, 64), rng=random.Random(0)).to(torch_device) generator = torch.Generator(device="cpu").manual_seed(0) output = pipe_1( prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, image=image, num_inference_steps=2, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (64, 64, 3) mem_bytes = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" ) assert_mean_pixel_difference(image, expected_image) # pipeline 2 _start_torch_memory_measurement() generator = torch.Generator(device="cpu").manual_seed(0) original_image = floats_tensor((1, 3, 256, 256), rng=random.Random(0)).to(torch_device) image = floats_tensor((1, 3, 64, 64), rng=random.Random(0)).to(torch_device) output = pipe_2( prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, image=image, original_image=original_image, generator=generator, num_inference_steps=2, output_type="np", ) image = output.images[0] assert image.shape == (256, 256, 3) mem_bytes = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" ) assert_mean_pixel_difference(image, expected_image) def _test_if_inpainting(self, pipe_1, pipe_2, prompt_embeds, negative_prompt_embeds): # pipeline 1 _start_torch_memory_measurement() image = floats_tensor((1, 3, 64, 64), rng=random.Random(0)).to(torch_device) mask_image = floats_tensor((1, 3, 64, 64), rng=random.Random(1)).to(torch_device) generator = torch.Generator(device="cpu").manual_seed(0) output = pipe_1( prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, image=image, mask_image=mask_image, num_inference_steps=2, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (64, 64, 3) mem_bytes = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" ) assert_mean_pixel_difference(image, expected_image) # pipeline 2 _start_torch_memory_measurement() generator = torch.Generator(device="cpu").manual_seed(0) image = floats_tensor((1, 3, 64, 64), rng=random.Random(0)).to(torch_device) original_image = floats_tensor((1, 3, 256, 256), rng=random.Random(0)).to(torch_device) mask_image = floats_tensor((1, 3, 256, 256), rng=random.Random(1)).to(torch_device) output = pipe_2( prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, image=image, mask_image=mask_image, original_image=original_image, generator=generator, num_inference_steps=2, output_type="np", ) image = output.images[0] assert image.shape == (256, 256, 3) mem_bytes = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" ) assert_mean_pixel_difference(image, expected_image) def _start_torch_memory_measurement(): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/deepfloyd_if/test_if_superresolution.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import torch from diffusers import IFSuperResolutionPipeline from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class IFSuperResolutionPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.TestCase): pipeline_class = IFSuperResolutionPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} def get_dummy_components(self): return self._get_superresolution_dummy_components() def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) def test_save_load_optional_components(self): self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_save_load_float16(self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_float16(expected_max_diff=1e-1) def test_attention_slicing_forward_pass(self): self._test_attention_slicing_forward_pass(expected_max_diff=1e-2) def test_save_load_local(self): self._test_save_load_local() def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical( expected_max_diff=1e-2, )
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/deepfloyd_if/test_if_inpainting.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class IFInpaintingPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.TestCase): pipeline_class = IFInpaintingPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} def get_dummy_components(self): return self._get_dummy_components() def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) mask_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) def test_save_load_optional_components(self): self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_save_load_float16(self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_float16(expected_max_diff=1e-1) def test_attention_slicing_forward_pass(self): self._test_attention_slicing_forward_pass(expected_max_diff=1e-2) def test_save_load_local(self): self._test_save_load_local() def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical( expected_max_diff=1e-2, )
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/deepfloyd_if/test_if_img2img.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import torch from diffusers import IFImg2ImgPipeline from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class IFImg2ImgPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.TestCase): pipeline_class = IFImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} def get_dummy_components(self): return self._get_dummy_components() def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def test_save_load_optional_components(self): self._test_save_load_optional_components() @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_save_load_float16(self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_float16(expected_max_diff=1e-1) @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) def test_attention_slicing_forward_pass(self): self._test_attention_slicing_forward_pass(expected_max_diff=1e-2) def test_save_load_local(self): self._test_save_load_local() def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical( expected_max_diff=1e-2, )
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/deepfloyd_if/__init__.py
import tempfile import numpy as np import torch from transformers import AutoTokenizer, T5EncoderModel from diffusers import DDPMScheduler, UNet2DConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np # WARN: the hf-internal-testing/tiny-random-t5 text encoder has some non-determinism in the `save_load` tests. class IFPipelineTesterMixin: def _get_dummy_components(self): torch.manual_seed(0) text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) unet = UNet2DConditionModel( sample_size=32, layers_per_block=1, block_out_channels=[32, 64], down_block_types=[ "ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D", ], mid_block_type="UNetMidBlock2DSimpleCrossAttn", up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"], in_channels=3, out_channels=6, cross_attention_dim=32, encoder_hid_dim=32, attention_head_dim=8, addition_embed_type="text", addition_embed_type_num_heads=2, cross_attention_norm="group_norm", resnet_time_scale_shift="scale_shift", act_fn="gelu", ) unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests torch.manual_seed(0) scheduler = DDPMScheduler( num_train_timesteps=1000, beta_schedule="squaredcos_cap_v2", beta_start=0.0001, beta_end=0.02, thresholding=True, dynamic_thresholding_ratio=0.95, sample_max_value=1.0, prediction_type="epsilon", variance_type="learned_range", ) torch.manual_seed(0) watermarker = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def _get_superresolution_dummy_components(self): torch.manual_seed(0) text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) unet = UNet2DConditionModel( sample_size=32, layers_per_block=[1, 2], block_out_channels=[32, 64], down_block_types=[ "ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D", ], mid_block_type="UNetMidBlock2DSimpleCrossAttn", up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"], in_channels=6, out_channels=6, cross_attention_dim=32, encoder_hid_dim=32, attention_head_dim=8, addition_embed_type="text", addition_embed_type_num_heads=2, cross_attention_norm="group_norm", resnet_time_scale_shift="scale_shift", act_fn="gelu", class_embed_type="timestep", mid_block_scale_factor=1.414, time_embedding_act_fn="gelu", time_embedding_dim=32, ) unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests torch.manual_seed(0) scheduler = DDPMScheduler( num_train_timesteps=1000, beta_schedule="squaredcos_cap_v2", beta_start=0.0001, beta_end=0.02, thresholding=True, dynamic_thresholding_ratio=0.95, sample_max_value=1.0, prediction_type="epsilon", variance_type="learned_range", ) torch.manual_seed(0) image_noising_scheduler = DDPMScheduler( num_train_timesteps=1000, beta_schedule="squaredcos_cap_v2", beta_start=0.0001, beta_end=0.02, ) torch.manual_seed(0) watermarker = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } # this test is modified from the base class because if pipelines set the text encoder # as optional with the intention that the user is allowed to encode the prompt once # and then pass the embeddings directly to the pipeline. The base class test uses # the unmodified arguments from `self.get_dummy_inputs` which will pass the unencoded # prompt to the pipeline when the text encoder is set to None, throwing an error. # So we make the test reflect the intended usage of setting the text encoder to None. def _test_save_load_optional_components(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) prompt = inputs["prompt"] generator = inputs["generator"] num_inference_steps = inputs["num_inference_steps"] output_type = inputs["output_type"] if "image" in inputs: image = inputs["image"] else: image = None if "mask_image" in inputs: mask_image = inputs["mask_image"] else: mask_image = None if "original_image" in inputs: original_image = inputs["original_image"] else: original_image = None prompt_embeds, negative_prompt_embeds = pipe.encode_prompt(prompt) # inputs with prompt converted to embeddings inputs = { "prompt_embeds": prompt_embeds, "negative_prompt_embeds": negative_prompt_embeds, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, } if image is not None: inputs["image"] = image if mask_image is not None: inputs["mask_image"] = mask_image if original_image is not None: inputs["original_image"] = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(torch_device) generator = inputs["generator"] num_inference_steps = inputs["num_inference_steps"] output_type = inputs["output_type"] # inputs with prompt converted to embeddings inputs = { "prompt_embeds": prompt_embeds, "negative_prompt_embeds": negative_prompt_embeds, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, } if image is not None: inputs["image"] = image if mask_image is not None: inputs["mask_image"] = mask_image if original_image is not None: inputs["original_image"] = original_image output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, 1e-4) # Modified from `PipelineTesterMixin` to set the attn processor as it's not serialized. # This should be handled in the base test and then this method can be removed. def _test_save_load_local(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, 1e-4)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/deepfloyd_if/test_if_img2img_superresolution.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import torch from diffusers import IFImg2ImgSuperResolutionPipeline from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class IFImg2ImgSuperResolutionPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.TestCase): pipeline_class = IFImg2ImgSuperResolutionPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"}) required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} def get_dummy_components(self): return self._get_superresolution_dummy_components() def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) original_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = floats_tensor((1, 3, 16, 16), rng=random.Random(seed)).to(device) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "original_image": original_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) def test_save_load_optional_components(self): self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_save_load_float16(self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_float16(expected_max_diff=1e-1) def test_attention_slicing_forward_pass(self): self._test_attention_slicing_forward_pass(expected_max_diff=1e-2) def test_save_load_local(self): self._test_save_load_local() def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical( expected_max_diff=1e-2, )
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import ( CLIPTextConfig, CLIPTextModel, CLIPTokenizer, DPTConfig, DPTFeatureExtractor, DPTForDepthEstimation, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionDepth2ImgPipeline, UNet2DConditionModel, ) from diffusers.utils import is_accelerate_available, is_accelerate_version from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class StableDiffusionDepth2ImgPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionDepth2ImgPipeline test_save_load_optional_components = False params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"depth_mask"}) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=5, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=True, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") backbone_config = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, "hidden_sizes": [96, 192, 384, 768], "num_groups": 2, } depth_estimator_config = DPTConfig( image_size=32, patch_size=16, num_channels=3, hidden_size=32, num_hidden_layers=4, backbone_out_indices=(0, 1, 2, 3), num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, is_decoder=False, initializer_range=0.02, is_hybrid=True, backbone_config=backbone_config, backbone_featmap_shape=[1, 384, 24, 24], ) depth_estimator = DPTForDepthEstimation(depth_estimator_config).eval() feature_extractor = DPTFeatureExtractor.from_pretrained( "hf-internal-testing/tiny-random-DPTForDepthEstimation" ) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "depth_estimator": depth_estimator, "feature_extractor": feature_extractor, } return components def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB").resize((32, 32)) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_save_load_local(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(output - output_loaded).max() self.assertLess(max_diff, 1e-4) @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_save_load_float16(self): components = self.get_dummy_components() for name, module in components.items(): if hasattr(module, "half"): components[name] = module.to(torch_device).half() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for name, component in pipe_loaded.components.items(): if hasattr(component, "dtype"): self.assertTrue( component.dtype == torch.float16, f"`{name}.dtype` switched from `float16` to {component.dtype} after loading.", ) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(output - output_loaded).max() self.assertLess(max_diff, 2e-2, "The output of the fp16 pipeline changed after saving and loading.") @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_float16_inference(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) for name, module in components.items(): if hasattr(module, "half"): components[name] = module.half() pipe_fp16 = self.pipeline_class(**components) pipe_fp16.to(torch_device) pipe_fp16.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(torch_device))[0] output_fp16 = pipe_fp16(**self.get_dummy_inputs(torch_device))[0] max_diff = np.abs(output - output_fp16).max() self.assertLess(max_diff, 1.3e-2, "The outputs of the fp16 and fp32 pipelines are too different.") @unittest.skipIf( torch_device != "cuda" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), reason="CPU offload is only available with CUDA and `accelerate v0.14.0` or higher", ) def test_cpu_offload_forward_pass(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_without_offload = pipe(**inputs)[0] pipe.enable_sequential_cpu_offload() inputs = self.get_dummy_inputs(torch_device) output_with_offload = pipe(**inputs)[0] max_diff = np.abs(output_with_offload - output_without_offload).max() self.assertLess(max_diff, 1e-4, "CPU offloading should not affect the inference results") def test_dict_tuple_outputs_equivalent(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(torch_device))[0] output_tuple = pipe(**self.get_dummy_inputs(torch_device), return_dict=False)[0] max_diff = np.abs(output - output_tuple).max() self.assertLess(max_diff, 1e-4) def test_progress_bar(self): super().test_progress_bar() def test_stable_diffusion_depth2img_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = StableDiffusionDepth2ImgPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) if torch_device == "mps": expected_slice = np.array([0.6071, 0.5035, 0.4378, 0.5776, 0.5753, 0.4316, 0.4513, 0.5263, 0.4546]) else: expected_slice = np.array([0.5435, 0.4992, 0.3783, 0.4411, 0.5842, 0.4654, 0.3786, 0.5077, 0.4655]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_depth2img_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = StableDiffusionDepth2ImgPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" output = pipe(**inputs, negative_prompt=negative_prompt) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) if torch_device == "mps": expected_slice = np.array([0.6296, 0.5125, 0.3890, 0.4456, 0.5955, 0.4621, 0.3810, 0.5310, 0.4626]) else: expected_slice = np.array([0.6012, 0.4507, 0.3769, 0.4121, 0.5566, 0.4585, 0.3803, 0.5045, 0.4631]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_depth2img_multiple_init_images(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = StableDiffusionDepth2ImgPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["prompt"] = [inputs["prompt"]] * 2 inputs["image"] = 2 * [inputs["image"]] image = pipe(**inputs).images image_slice = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) if torch_device == "mps": expected_slice = np.array([0.6501, 0.5150, 0.4939, 0.6688, 0.5437, 0.5758, 0.5115, 0.4406, 0.4551]) else: expected_slice = np.array([0.6557, 0.6214, 0.6254, 0.5775, 0.4785, 0.5949, 0.5904, 0.4785, 0.4730]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_depth2img_pil(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = StableDiffusionDepth2ImgPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] if torch_device == "mps": expected_slice = np.array([0.53232, 0.47015, 0.40868, 0.45651, 0.4891, 0.4668, 0.4287, 0.48822, 0.47439]) else: expected_slice = np.array([0.5435, 0.4992, 0.3783, 0.4411, 0.5842, 0.4654, 0.3786, 0.5077, 0.4655]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 @skip_mps def test_attention_slicing_forward_pass(self): return super().test_attention_slicing_forward_pass() def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=7e-3) @slow @require_torch_gpu class StableDiffusionDepth2ImgPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/depth2img/two_cats.png" ) inputs = { "prompt": "two tigers", "image": init_image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_stable_diffusion_depth2img_pipeline_default(self): pipe = StableDiffusionDepth2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-2-depth", safety_checker=None ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 480, 640, 3) expected_slice = np.array([0.5435, 0.4992, 0.3783, 0.4411, 0.5842, 0.4654, 0.3786, 0.5077, 0.4655]) assert np.abs(expected_slice - image_slice).max() < 6e-1 def test_stable_diffusion_depth2img_pipeline_k_lms(self): pipe = StableDiffusionDepth2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-2-depth", safety_checker=None ) pipe.unet.set_default_attn_processor() pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 480, 640, 3) expected_slice = np.array([0.6363, 0.6274, 0.6309, 0.6370, 0.6226, 0.6286, 0.6213, 0.6453, 0.6306]) assert np.abs(expected_slice - image_slice).max() < 8e-4 def test_stable_diffusion_depth2img_pipeline_ddim(self): pipe = StableDiffusionDepth2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-2-depth", safety_checker=None ) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 480, 640, 3) expected_slice = np.array([0.6424, 0.6524, 0.6249, 0.6041, 0.6634, 0.6420, 0.6522, 0.6555, 0.6436]) assert np.abs(expected_slice - image_slice).max() < 5e-4 def test_stable_diffusion_depth2img_intermediate_state(self): number_of_steps = 0 def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None: callback_fn.has_been_called = True nonlocal number_of_steps number_of_steps += 1 if step == 1: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 60, 80) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [-0.7168, -1.5137, -0.1418, -2.9219, -2.7266, -2.4414, -2.1035, -3.0078, -1.7051] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 elif step == 2: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 60, 80) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [-0.7109, -1.5068, -0.1403, -2.9160, -2.7207, -2.4414, -2.1035, -3.0059, -1.7090] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 callback_fn.has_been_called = False pipe = StableDiffusionDepth2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-2-depth", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(dtype=torch.float16) pipe(**inputs, callback=callback_fn, callback_steps=1) assert callback_fn.has_been_called assert number_of_steps == 2 def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionDepth2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-2-depth", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() inputs = self.get_inputs(dtype=torch.float16) _ = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9 @nightly @require_torch_gpu class StableDiffusionImg2ImgPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/depth2img/two_cats.png" ) inputs = { "prompt": "two tigers", "image": init_image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_depth2img_pndm(self): pipe = StableDiffusionDepth2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-depth") pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs() image = pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_depth2img/stable_diffusion_2_0_pndm.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_depth2img_ddim(self): pipe = StableDiffusionDepth2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-depth") pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs() image = pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_depth2img/stable_diffusion_2_0_ddim.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_img2img_lms(self): pipe = StableDiffusionDepth2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-depth") pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs() image = pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_depth2img/stable_diffusion_2_0_lms.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_img2img_dpm(self): pipe = StableDiffusionDepth2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-depth") pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs() inputs["num_inference_steps"] = 30 image = pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_depth2img/stable_diffusion_2_0_dpm_multi.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_attend_and_excite.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import ( load_numpy, nightly, numpy_cosine_similarity_distance, require_torch_gpu, skip_mps, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin torch.backends.cuda.matmul.allow_tf32 = False @skip_mps class StableDiffusionAttendAndExcitePipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionAttendAndExcitePipeline test_attention_slicing = False params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS.union({"token_indices"}) image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS # Attend and excite requires being able to run a backward pass at # inference time. There's no deterministic backward operator for pad @classmethod def setUpClass(cls): super().setUpClass() torch.use_deterministic_algorithms(False) @classmethod def tearDownClass(cls): super().tearDownClass() torch.use_deterministic_algorithms(True) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=512, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = inputs = { "prompt": "a cat and a frog", "token_indices": [2, 5], "generator": generator, "num_inference_steps": 1, "guidance_scale": 6.0, "output_type": "numpy", "max_iter_to_alter": 2, "thresholds": {0: 0.7}, } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 64, 64, 3)) expected_slice = np.array( [0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] ) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_sequential_cpu_offload_forward_pass(self): super().test_sequential_cpu_offload_forward_pass(expected_max_diff=5e-4) def test_inference_batch_consistent(self): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2]) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(batch_size=2, expected_max_diff=7e-4) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3) def test_pt_np_pil_outputs_equivalent(self): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4) def test_save_load_local(self): super().test_save_load_local(expected_max_difference=5e-4) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=4e-4) @require_torch_gpu @nightly class StableDiffusionAttendAndExcitePipelineIntegrationTests(unittest.TestCase): # Attend and excite requires being able to run a backward pass at # inference time. There's no deterministic backward operator for pad @classmethod def setUpClass(cls): super().setUpClass() torch.use_deterministic_algorithms(False) @classmethod def tearDownClass(cls): super().tearDownClass() torch.use_deterministic_algorithms(True) def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_attend_and_excite_fp16(self): generator = torch.manual_seed(51) pipe = StableDiffusionAttendAndExcitePipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16 ) pipe.to("cuda") prompt = "a painting of an elephant with glasses" token_indices = [5, 7] image = pipe( prompt=prompt, token_indices=token_indices, guidance_scale=7.5, generator=generator, num_inference_steps=5, max_iter_to_alter=5, output_type="numpy", ).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy" ) max_diff = numpy_cosine_similarity_distance(image.flatten(), expected_image.flatten()) assert max_diff < 5e-1
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_diffedit.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, nightly, numpy_cosine_similarity_distance, require_torch_gpu, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class StableDiffusionDiffEditPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_class = StableDiffusionDiffEditPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"} batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"} image_params = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess image_latents_params = frozenset([]) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) inverse_scheduler = DDIMInverseScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_zero=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=512, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "inverse_scheduler": inverse_scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): mask = floats_tensor((1, 16, 16), rng=random.Random(seed)).to(device) latents = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(seed)).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "a dog and a newt", "mask_image": mask, "image_latents": latents, "generator": generator, "num_inference_steps": 2, "inpaint_strength": 1.0, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def get_dummy_mask_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB") if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": image, "source_prompt": "a cat and a frog", "target_prompt": "a dog and a newt", "generator": generator, "num_inference_steps": 2, "num_maps_per_mask": 2, "mask_encode_strength": 1.0, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def get_dummy_inversion_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB") if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": image, "prompt": "a cat and a frog", "generator": generator, "num_inference_steps": 2, "inpaint_strength": 1.0, "guidance_scale": 6.0, "decode_latents": True, "output_type": "numpy", } return inputs def test_save_load_optional_components(self): if not hasattr(self.pipeline_class, "_optional_components"): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components}) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(output - output_loaded).max() self.assertLess(max_diff, 1e-4) def test_mask(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_mask_inputs(device) mask = pipe.generate_mask(**inputs) mask_slice = mask[0, -3:, -3:] self.assertEqual(mask.shape, (1, 16, 16)) expected_slice = np.array([0] * 9) max_diff = np.abs(mask_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) self.assertEqual(mask[0, -3, -4], 0) def test_inversion(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inversion_inputs(device) image = pipe.invert(**inputs).images image_slice = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3)) expected_slice = np.array( [0.5160, 0.5115, 0.5060, 0.5456, 0.4704, 0.5060, 0.5019, 0.4405, 0.4726], ) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=5e-3) def test_inversion_dpm(self): device = "cpu" components = self.get_dummy_components() scheduler_args = {"beta_start": 0.00085, "beta_end": 0.012, "beta_schedule": "scaled_linear"} components["scheduler"] = DPMSolverMultistepScheduler(**scheduler_args) components["inverse_scheduler"] = DPMSolverMultistepInverseScheduler(**scheduler_args) pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inversion_inputs(device) image = pipe.invert(**inputs).images image_slice = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3)) expected_slice = np.array( [0.5305, 0.4673, 0.5314, 0.5308, 0.4886, 0.5279, 0.5142, 0.4724, 0.4892], ) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) @require_torch_gpu @nightly class StableDiffusionDiffEditPipelineIntegrationTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def setUpClass(cls): raw_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" ) raw_image = raw_image.convert("RGB").resize((256, 256)) cls.raw_image = raw_image def test_stable_diffusion_diffedit_full(self): generator = torch.manual_seed(0) pipe = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1-base", safety_checker=None, torch_dtype=torch.float16 ) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.scheduler.clip_sample = True pipe.inverse_scheduler = DDIMInverseScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) source_prompt = "a bowl of fruit" target_prompt = "a bowl of pears" mask_image = pipe.generate_mask( image=self.raw_image, source_prompt=source_prompt, target_prompt=target_prompt, generator=generator, ) inv_latents = pipe.invert( prompt=source_prompt, image=self.raw_image, inpaint_strength=0.7, generator=generator, num_inference_steps=5, ).latents image = pipe( prompt=target_prompt, mask_image=mask_image, image_latents=inv_latents, generator=generator, negative_prompt=source_prompt, inpaint_strength=0.7, num_inference_steps=5, output_type="np", ).images[0] expected_image = ( np.array( load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/diffedit/pears.png" ).resize((256, 256)) ) / 255 ) assert numpy_cosine_similarity_distance(expected_image.flatten(), image.flatten()) < 2e-1 @nightly @require_torch_gpu class StableDiffusionDiffEditPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def setUpClass(cls): raw_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" ) raw_image = raw_image.convert("RGB").resize((768, 768)) cls.raw_image = raw_image def test_stable_diffusion_diffedit_dpm(self): generator = torch.manual_seed(0) pipe = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1", safety_checker=None, torch_dtype=torch.float16 ) pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.inverse_scheduler = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) source_prompt = "a bowl of fruit" target_prompt = "a bowl of pears" mask_image = pipe.generate_mask( image=self.raw_image, source_prompt=source_prompt, target_prompt=target_prompt, generator=generator, ) inv_latents = pipe.invert( prompt=source_prompt, image=self.raw_image, inpaint_strength=0.7, generator=generator, num_inference_steps=25, ).latents image = pipe( prompt=target_prompt, mask_image=mask_image, image_latents=inv_latents, generator=generator, negative_prompt=source_prompt, inpaint_strength=0.7, num_inference_steps=25, output_type="numpy", ).images[0] expected_image = ( np.array( load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/diffedit/pears.png" ).resize((768, 768)) ) / 255 ) assert np.abs((expected_image - image).max()) < 5e-1
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_latent_upscale.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, UNet2DConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() def check_same_shape(tensor_list): shapes = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:]) class StableDiffusionLatentUpscalePipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionLatentUpscalePipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { "height", "width", "cross_attention_kwargs", "negative_prompt_embeds", "prompt_embeds", } required_optional_params = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess image_latents_params = frozenset([]) @property def dummy_image(self): batch_size = 1 num_channels = 4 sizes = (16, 16) image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) return image def get_dummy_components(self): torch.manual_seed(0) model = UNet2DConditionModel( act_fn="gelu", attention_head_dim=8, norm_num_groups=None, block_out_channels=[32, 32, 64, 64], time_cond_proj_dim=160, conv_in_kernel=1, conv_out_kernel=1, cross_attention_dim=32, down_block_types=( "KDownBlock2D", "KCrossAttnDownBlock2D", "KCrossAttnDownBlock2D", "KCrossAttnDownBlock2D", ), in_channels=8, mid_block_type=None, only_cross_attention=False, out_channels=5, resnet_time_scale_shift="scale_shift", time_embedding_type="fourier", timestep_post_act="gelu", up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D"), ) vae = AutoencoderKL( block_out_channels=[32, 32, 64, 64], in_channels=3, out_channels=3, down_block_types=[ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", ], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) scheduler = EulerDiscreteScheduler(prediction_type="sample") text_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="quick_gelu", projection_dim=512, ) text_encoder = CLIPTextModel(text_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": model.eval(), "vae": vae.eval(), "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": self.dummy_image.cpu(), "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 256, 256, 3)) expected_slice = np.array( [0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] ) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=7e-3) def test_sequential_cpu_offload_forward_pass(self): super().test_sequential_cpu_offload_forward_pass(expected_max_diff=3e-3) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=7e-3) def test_pt_np_pil_outputs_equivalent(self): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3) def test_save_load_local(self): super().test_save_load_local(expected_max_difference=3e-3) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=3e-3) def test_karras_schedulers_shape(self): skip_schedulers = [ "DDIMScheduler", "DDPMScheduler", "PNDMScheduler", "HeunDiscreteScheduler", "EulerAncestralDiscreteScheduler", "KDPM2DiscreteScheduler", "KDPM2AncestralDiscreteScheduler", "DPMSolverSDEScheduler", ] components = self.get_dummy_components() pipe = self.pipeline_class(**components) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=True) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 2 outputs = [] for scheduler_enum in KarrasDiffusionSchedulers: if scheduler_enum.name in skip_schedulers: # no sigma schedulers are not supported # no schedulers continue scheduler_cls = getattr(diffusers, scheduler_enum.name) pipe.scheduler = scheduler_cls.from_config(pipe.scheduler.config) output = pipe(**inputs)[0] outputs.append(output) assert check_same_shape(outputs) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) @require_torch_gpu @slow class StableDiffusionLatentUpscalePipelineIntegrationTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_latent_upscaler_fp16(self): generator = torch.manual_seed(33) pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) pipe.to("cuda") upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained( "stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16 ) upscaler.to("cuda") prompt = "a photo of an astronaut high resolution, unreal engine, ultra realistic" low_res_latents = pipe(prompt, generator=generator, output_type="latent").images image = upscaler( prompt=prompt, image=low_res_latents, num_inference_steps=20, guidance_scale=0, generator=generator, output_type="np", ).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" ) assert np.abs((expected_image - image).mean()) < 5e-2 def test_latent_upscaler_fp16_image(self): generator = torch.manual_seed(33) upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained( "stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16 ) upscaler.to("cuda") prompt = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas" low_res_img = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" ) image = upscaler( prompt=prompt, image=low_res_img, num_inference_steps=20, guidance_scale=0, generator=generator, output_type="np", ).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" ) assert np.abs((expected_image - image).max()) < 5e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNet2DConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device, ) enable_full_determinism() class StableDiffusionUpscalePipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def dummy_image(self): batch_size = 1 num_channels = 3 sizes = (32, 32) image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) return image @property def dummy_cond_unet_upscale(self): torch.manual_seed(0) model = UNet2DConditionModel( block_out_channels=(32, 32, 64), layers_per_block=2, sample_size=32, in_channels=7, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, # SD2-specific config below attention_head_dim=8, use_linear_projection=True, only_cross_attention=(True, True, False), num_class_embeds=100, ) return model @property def dummy_vae(self): torch.manual_seed(0) model = AutoencoderKL( block_out_channels=[32, 32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) return model @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=512, ) return CLIPTextModel(config) def test_stable_diffusion_upscale(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unet = self.dummy_cond_unet_upscale low_res_scheduler = DDPMScheduler() scheduler = DDIMScheduler(prediction_type="v_prediction") vae = self.dummy_vae text_encoder = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] low_res_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionUpscalePipeline( unet=unet, low_res_scheduler=low_res_scheduler, scheduler=scheduler, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, max_noise_level=350, ) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.Generator(device=device).manual_seed(0) output = sd_pipe( [prompt], image=low_res_image, generator=generator, guidance_scale=6.0, noise_level=20, num_inference_steps=2, output_type="np", ) image = output.images generator = torch.Generator(device=device).manual_seed(0) image_from_tuple = sd_pipe( [prompt], image=low_res_image, generator=generator, guidance_scale=6.0, noise_level=20, num_inference_steps=2, output_type="np", return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] expected_height_width = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) expected_slice = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_upscale_batch(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unet = self.dummy_cond_unet_upscale low_res_scheduler = DDPMScheduler() scheduler = DDIMScheduler(prediction_type="v_prediction") vae = self.dummy_vae text_encoder = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] low_res_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionUpscalePipeline( unet=unet, low_res_scheduler=low_res_scheduler, scheduler=scheduler, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, max_noise_level=350, ) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" output = sd_pipe( 2 * [prompt], image=2 * [low_res_image], guidance_scale=6.0, noise_level=20, num_inference_steps=2, output_type="np", ) image = output.images assert image.shape[0] == 2 generator = torch.Generator(device=device).manual_seed(0) output = sd_pipe( [prompt], image=low_res_image, generator=generator, num_images_per_prompt=2, guidance_scale=6.0, noise_level=20, num_inference_steps=2, output_type="np", ) image = output.images assert image.shape[0] == 2 def test_stable_diffusion_upscale_prompt_embeds(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unet = self.dummy_cond_unet_upscale low_res_scheduler = DDPMScheduler() scheduler = DDIMScheduler(prediction_type="v_prediction") vae = self.dummy_vae text_encoder = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] low_res_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionUpscalePipeline( unet=unet, low_res_scheduler=low_res_scheduler, scheduler=scheduler, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, max_noise_level=350, ) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.Generator(device=device).manual_seed(0) output = sd_pipe( [prompt], image=low_res_image, generator=generator, guidance_scale=6.0, noise_level=20, num_inference_steps=2, output_type="np", ) image = output.images generator = torch.Generator(device=device).manual_seed(0) prompt_embeds, negative_prompt_embeds = sd_pipe.encode_prompt(prompt, device, 1, False) if negative_prompt_embeds is not None: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) image_from_prompt_embeds = sd_pipe( prompt_embeds=prompt_embeds, image=[low_res_image], generator=generator, guidance_scale=6.0, noise_level=20, num_inference_steps=2, output_type="np", return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_prompt_embeds_slice = image_from_prompt_embeds[0, -3:, -3:, -1] expected_height_width = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) expected_slice = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_prompt_embeds_slice.flatten() - expected_slice).max() < 1e-2 @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") def test_stable_diffusion_upscale_fp16(self): """Test that stable diffusion upscale works with fp16""" unet = self.dummy_cond_unet_upscale low_res_scheduler = DDPMScheduler() scheduler = DDIMScheduler(prediction_type="v_prediction") vae = self.dummy_vae text_encoder = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] low_res_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) # put models in fp16, except vae as it overflows in fp16 unet = unet.half() text_encoder = text_encoder.half() # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionUpscalePipeline( unet=unet, low_res_scheduler=low_res_scheduler, scheduler=scheduler, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, max_noise_level=350, ) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) image = sd_pipe( [prompt], image=low_res_image, generator=generator, num_inference_steps=2, output_type="np", ).images expected_height_width = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) def test_stable_diffusion_upscale_from_save_pretrained(self): pipes = [] device = "cpu" # ensure determinism for the device-dependent torch.Generator low_res_scheduler = DDPMScheduler() scheduler = DDIMScheduler(prediction_type="v_prediction") tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionUpscalePipeline( unet=self.dummy_cond_unet_upscale, low_res_scheduler=low_res_scheduler, scheduler=scheduler, vae=self.dummy_vae, text_encoder=self.dummy_text_encoder, tokenizer=tokenizer, max_noise_level=350, ) sd_pipe = sd_pipe.to(device) pipes.append(sd_pipe) with tempfile.TemporaryDirectory() as tmpdirname: sd_pipe.save_pretrained(tmpdirname) sd_pipe = StableDiffusionUpscalePipeline.from_pretrained(tmpdirname).to(device) pipes.append(sd_pipe) prompt = "A painting of a squirrel eating a burger" image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] low_res_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) image_slices = [] for pipe in pipes: generator = torch.Generator(device=device).manual_seed(0) image = pipe( [prompt], image=low_res_image, generator=generator, guidance_scale=6.0, noise_level=20, num_inference_steps=2, output_type="np", ).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 @slow @require_torch_gpu class StableDiffusionUpscalePipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_diffusion_upscale_pipeline(self): image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat.npy" ) model_id = "stabilityai/stable-diffusion-x4-upscaler" pipe = StableDiffusionUpscalePipeline.from_pretrained(model_id) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() prompt = "a cat sitting on a park bench" generator = torch.manual_seed(0) output = pipe( prompt=prompt, image=image, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 1e-3 def test_stable_diffusion_upscale_pipeline_fp16(self): image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat_fp16.npy" ) model_id = "stabilityai/stable-diffusion-x4-upscaler" pipe = StableDiffusionUpscalePipeline.from_pretrained( model_id, torch_dtype=torch.float16, ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() prompt = "a cat sitting on a park bench" generator = torch.manual_seed(0) output = pipe( prompt=prompt, image=image, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) model_id = "stabilityai/stable-diffusion-x4-upscaler" pipe = StableDiffusionUpscalePipeline.from_pretrained( model_id, torch_dtype=torch.float16, ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() prompt = "a cat sitting on a park bench" generator = torch.manual_seed(0) _ = pipe( prompt=prompt, image=image, generator=generator, num_inference_steps=5, output_type="np", ) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9 def test_download_ckpt_diff_format_is_same(self): image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) prompt = "a cat sitting on a park bench" model_id = "stabilityai/stable-diffusion-x4-upscaler" pipe = StableDiffusionUpscalePipeline.from_pretrained(model_id) pipe.enable_model_cpu_offload() generator = torch.Generator("cpu").manual_seed(0) output = pipe(prompt=prompt, image=image, generator=generator, output_type="np", num_inference_steps=3) image_from_pretrained = output.images[0] single_file_path = ( "https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler/blob/main/x4-upscaler-ema.safetensors" ) pipe_from_single_file = StableDiffusionUpscalePipeline.from_single_file(single_file_path) pipe_from_single_file.enable_model_cpu_offload() generator = torch.Generator("cpu").manual_seed(0) output_from_single_file = pipe_from_single_file( prompt=prompt, image=image, generator=generator, output_type="np", num_inference_steps=3 ) image_from_single_file = output_from_single_file.images[0] assert image_from_pretrained.shape == (512, 512, 3) assert image_from_single_file.shape == (512, 512, 3) assert ( numpy_cosine_similarity_distance(image_from_pretrained.flatten(), image_from_single_file.flatten()) < 1e-3 )
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class StableDiffusion2InpaintPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess image_latents_params = frozenset([]) callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"mask", "masked_image_latents"}) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=512, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((64, 64)) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_stable_diffusion_inpaint(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class StableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_diffusion_inpaint_pipeline(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench.npy" ) model_id = "stabilityai/stable-diffusion-2-inpainting" pipe = StableDiffusionInpaintPipeline.from_pretrained(model_id, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() prompt = "Face of a yellow cat, high resolution, sitting on a park bench" generator = torch.manual_seed(0) output = pipe( prompt=prompt, image=init_image, mask_image=mask_image, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 9e-3 def test_stable_diffusion_inpaint_pipeline_fp16(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench_fp16.npy" ) model_id = "stabilityai/stable-diffusion-2-inpainting" pipe = StableDiffusionInpaintPipeline.from_pretrained( model_id, torch_dtype=torch.float16, safety_checker=None, ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() prompt = "Face of a yellow cat, high resolution, sitting on a park bench" generator = torch.manual_seed(0) output = pipe( prompt=prompt, image=init_image, mask_image=mask_image, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) model_id = "stabilityai/stable-diffusion-2-inpainting" pndm = PNDMScheduler.from_pretrained(model_id, subfolder="scheduler") pipe = StableDiffusionInpaintPipeline.from_pretrained( model_id, safety_checker=None, scheduler=pndm, torch_dtype=torch.float16, ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() prompt = "Face of a yellow cat, high resolution, sitting on a park bench" generator = torch.manual_seed(0) _ = pipe( prompt=prompt, image=init_image, mask_image=mask_image, generator=generator, num_inference_steps=2, output_type="np", ) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNet2DConditionModel, logging, ) from diffusers.utils.testing_utils import ( CaptureLogger, enable_full_determinism, load_numpy, nightly, numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class StableDiffusion2PipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=512, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_stable_diffusion_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5753, 0.6113, 0.5005, 0.5036, 0.5464, 0.4725, 0.4982, 0.4865, 0.4861]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_pndm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5121, 0.5714, 0.4827, 0.5057, 0.5646, 0.4766, 0.5189, 0.4895, 0.4990]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_k_lms(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4865, 0.5439, 0.4840, 0.4995, 0.5543, 0.4846, 0.5199, 0.4942, 0.5061]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_k_euler_ancestral(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = EulerAncestralDiscreteScheduler.from_config(components["scheduler"].config) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4864, 0.5440, 0.4842, 0.4994, 0.5543, 0.4846, 0.5196, 0.4942, 0.5063]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_k_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = EulerDiscreteScheduler.from_config(components["scheduler"].config) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4865, 0.5439, 0.4840, 0.4995, 0.5543, 0.4846, 0.5199, 0.4942, 0.5061]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_unflawed(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = DDIMScheduler.from_config( components["scheduler"].config, timestep_spacing="trailing" ) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["guidance_rescale"] = 0.7 inputs["num_inference_steps"] = 10 image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4736, 0.5405, 0.4705, 0.4955, 0.5675, 0.4812, 0.5310, 0.4967, 0.5064]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_long_prompt(self): components = self.get_dummy_components() components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) do_classifier_free_guidance = True negative_prompt = None num_images_per_prompt = 1 logger = logging.get_logger("diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion") logger.setLevel(logging.WARNING) prompt = 25 * "@" with CaptureLogger(logger) as cap_logger_3: text_embeddings_3, negeative_text_embeddings_3 = sd_pipe.encode_prompt( prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) if negeative_text_embeddings_3 is not None: text_embeddings_3 = torch.cat([negeative_text_embeddings_3, text_embeddings_3]) prompt = 100 * "@" with CaptureLogger(logger) as cap_logger: text_embeddings, negative_embeddings = sd_pipe.encode_prompt( prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) if negative_embeddings is not None: text_embeddings = torch.cat([negative_embeddings, text_embeddings]) negative_prompt = "Hello" with CaptureLogger(logger) as cap_logger_2: text_embeddings_2, negative_text_embeddings_2 = sd_pipe.encode_prompt( prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) if negative_text_embeddings_2 is not None: text_embeddings_2 = torch.cat([negative_text_embeddings_2, text_embeddings_2]) assert text_embeddings_3.shape == text_embeddings_2.shape == text_embeddings.shape assert text_embeddings.shape[1] == 77 assert cap_logger.out == cap_logger_2.out # 100 - 77 + 1 (BOS token) + 1 (EOS token) = 25 assert cap_logger.out.count("@") == 25 assert cap_logger_3.out == "" def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class StableDiffusion2PipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_stable_diffusion_default_ddim(self): pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base") pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506]) assert np.abs(image_slice - expected_slice).max() < 7e-3 def test_stable_diffusion_pndm(self): pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base") pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506]) assert np.abs(image_slice - expected_slice).max() < 7e-3 def test_stable_diffusion_k_lms(self): pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base") pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.10440, 0.13115, 0.11100, 0.10141, 0.11440, 0.07215, 0.11332, 0.09693, 0.10006]) assert np.abs(image_slice - expected_slice).max() < 3e-3 def test_stable_diffusion_attention_slicing(self): torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16 ) pipe.unet.set_default_attn_processor() pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # enable attention slicing pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device, dtype=torch.float16) image_sliced = pipe(**inputs).images mem_bytes = torch.cuda.max_memory_allocated() torch.cuda.reset_peak_memory_stats() # make sure that less than 3.3 GB is allocated assert mem_bytes < 3.3 * 10**9 # disable slicing pipe.disable_attention_slicing() pipe.unet.set_default_attn_processor() inputs = self.get_inputs(torch_device, dtype=torch.float16) image = pipe(**inputs).images # make sure that more than 3.3 GB is allocated mem_bytes = torch.cuda.max_memory_allocated() assert mem_bytes > 3.3 * 10**9 max_diff = numpy_cosine_similarity_distance(image.flatten(), image_sliced.flatten()) assert max_diff < 5e-3 def test_stable_diffusion_text2img_intermediate_state(self): number_of_steps = 0 def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None: callback_fn.has_been_called = True nonlocal number_of_steps number_of_steps += 1 if step == 1: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [-0.3862, -0.4507, -1.1729, 0.0686, -1.1045, 0.7124, -1.8301, 0.1903, 1.2773] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 elif step == 2: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [0.2720, -0.1863, -0.7383, -0.5029, -0.7534, 0.3970, -0.7646, 0.4468, 1.2686] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 callback_fn.has_been_called = False pipe = StableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device, dtype=torch.float16) pipe(**inputs, callback=callback_fn, callback_steps=1) assert callback_fn.has_been_called assert number_of_steps == inputs["num_inference_steps"] def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() inputs = self.get_inputs(torch_device, dtype=torch.float16) _ = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 2.8 GB is allocated assert mem_bytes < 2.8 * 10**9 def test_stable_diffusion_pipeline_with_model_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() inputs = self.get_inputs(torch_device, dtype=torch.float16) # Normal inference pipe = StableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16, ) pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) outputs = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # With model offloading # Reload but don't move to cuda pipe = StableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16, ) pipe.unet.set_default_attn_processor() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device, dtype=torch.float16) outputs_offloaded = pipe(**inputs) mem_bytes_offloaded = torch.cuda.max_memory_allocated() images = outputs.images images_offloaded = outputs_offloaded.images max_diff = numpy_cosine_similarity_distance(images.flatten(), images_offloaded.flatten()) assert max_diff < 1e-3 assert mem_bytes_offloaded < mem_bytes assert mem_bytes_offloaded < 3 * 10**9 for module in pipe.text_encoder, pipe.unet, pipe.vae: assert module.device == torch.device("cpu") # With attention slicing torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe.enable_attention_slicing() _ = pipe(**inputs) mem_bytes_slicing = torch.cuda.max_memory_allocated() assert mem_bytes_slicing < mem_bytes_offloaded @nightly @require_torch_gpu class StableDiffusion2PipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_stable_diffusion_2_0_default_ddim(self): sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base").to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_2_text2img/stable_diffusion_2_0_base_ddim.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_stable_diffusion_2_1_default_pndm(self): sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_2_text2img/stable_diffusion_2_1_base_pndm.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_stable_diffusion_ddim(self): sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device) sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_2_text2img/stable_diffusion_2_1_base_ddim.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_stable_diffusion_lms(self): sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device) sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_2_text2img/stable_diffusion_2_1_base_lms.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_stable_diffusion_euler(self): sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device) sd_pipe.scheduler = EulerDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_2_text2img/stable_diffusion_2_1_base_euler.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_stable_diffusion_dpm(self): sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device) sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 25 image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_2_text2img/stable_diffusion_2_1_base_dpm_multi.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import time import unittest import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, EulerDiscreteScheduler, StableDiffusionPipeline, UNet2DConditionModel, ) from diffusers.models.attention_processor import AttnProcessor from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device, ) enable_full_determinism() class StableDiffusion2VPredictionPipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def dummy_cond_unet(self): torch.manual_seed(0) model = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, ) return model @property def dummy_vae(self): torch.manual_seed(0) model = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) return model @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=64, ) return CLIPTextModel(config) def test_stable_diffusion_v_pred_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unet = self.dummy_cond_unet scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, prediction_type="v_prediction", ) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=None, image_encoder=None, requires_safety_checker=False, ) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.Generator(device=device).manual_seed(0) output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") image = output.images generator = torch.Generator(device=device).manual_seed(0) image_from_tuple = sd_pipe( [prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np", return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6569, 0.6525, 0.5142, 0.4968, 0.4923, 0.4601, 0.4996, 0.5041, 0.4544]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_v_pred_k_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unet = self.dummy_cond_unet scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", prediction_type="v_prediction" ) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=None, image_encoder=None, requires_safety_checker=False, ) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.Generator(device=device).manual_seed(0) output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") image = output.images generator = torch.Generator(device=device).manual_seed(0) image_from_tuple = sd_pipe( [prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np", return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5644, 0.6514, 0.5190, 0.5663, 0.5287, 0.4953, 0.5430, 0.5243, 0.4778]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") def test_stable_diffusion_v_pred_fp16(self): """Test that stable diffusion v-prediction works with fp16""" unet = self.dummy_cond_unet scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, prediction_type="v_prediction", ) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") # put models in fp16 unet = unet.half() vae = vae.half() bert = bert.half() # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=None, image_encoder=None, requires_safety_checker=False, ) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) image = sd_pipe([prompt], generator=generator, num_inference_steps=2, output_type="np").images assert image.shape == (1, 64, 64, 3) @slow @require_torch_gpu class StableDiffusion2VPredictionPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_diffusion_v_pred_default(self): sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2") sd_pipe = sd_pipe.to(torch_device) sd_pipe.enable_attention_slicing() sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) output = sd_pipe([prompt], generator=generator, guidance_scale=7.5, num_inference_steps=20, output_type="np") image = output.images image_slice = image[0, 253:256, 253:256, -1] assert image.shape == (1, 768, 768, 3) expected_slice = np.array([0.1868, 0.1922, 0.1527, 0.1921, 0.1908, 0.1624, 0.1779, 0.1652, 0.1734]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_v_pred_upcast_attention(self): sd_pipe = StableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16 ) sd_pipe = sd_pipe.to(torch_device) sd_pipe.enable_attention_slicing() sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) output = sd_pipe([prompt], generator=generator, guidance_scale=7.5, num_inference_steps=20, output_type="np") image = output.images image_slice = image[0, 253:256, 253:256, -1] assert image.shape == (1, 768, 768, 3) expected_slice = np.array([0.4209, 0.4087, 0.4097, 0.4209, 0.3860, 0.4329, 0.4280, 0.4324, 0.4187]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 def test_stable_diffusion_v_pred_euler(self): scheduler = EulerDiscreteScheduler.from_pretrained("stabilityai/stable-diffusion-2", subfolder="scheduler") sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", scheduler=scheduler) sd_pipe = sd_pipe.to(torch_device) sd_pipe.enable_attention_slicing() sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) output = sd_pipe([prompt], generator=generator, num_inference_steps=5, output_type="numpy") image = output.images image_slice = image[0, 253:256, 253:256, -1] assert image.shape == (1, 768, 768, 3) expected_slice = np.array([0.1781, 0.1695, 0.1661, 0.1705, 0.1588, 0.1699, 0.2005, 0.1589, 0.1677]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_v_pred_dpm(self): """ TODO: update this test after making DPM compatible with V-prediction! """ scheduler = DPMSolverMultistepScheduler.from_pretrained( "stabilityai/stable-diffusion-2", subfolder="scheduler" ) sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", scheduler=scheduler) sd_pipe = sd_pipe.to(torch_device) sd_pipe.enable_attention_slicing() sd_pipe.set_progress_bar_config(disable=None) prompt = "a photograph of an astronaut riding a horse" generator = torch.manual_seed(0) image = sd_pipe( [prompt], generator=generator, guidance_scale=7.5, num_inference_steps=5, output_type="numpy" ).images image_slice = image[0, 253:256, 253:256, -1] assert image.shape == (1, 768, 768, 3) expected_slice = np.array([0.3303, 0.3184, 0.3291, 0.3300, 0.3256, 0.3113, 0.2965, 0.3134, 0.3192]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_attention_slicing_v_pred(self): torch.cuda.reset_peak_memory_stats() model_id = "stabilityai/stable-diffusion-2" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) prompt = "a photograph of an astronaut riding a horse" # make attention efficient pipe.enable_attention_slicing() generator = torch.manual_seed(0) output_chunked = pipe( [prompt], generator=generator, guidance_scale=7.5, num_inference_steps=10, output_type="numpy" ) image_chunked = output_chunked.images mem_bytes = torch.cuda.max_memory_allocated() torch.cuda.reset_peak_memory_stats() # make sure that less than 5.5 GB is allocated assert mem_bytes < 5.5 * 10**9 # disable slicing pipe.disable_attention_slicing() generator = torch.manual_seed(0) output = pipe([prompt], generator=generator, guidance_scale=7.5, num_inference_steps=10, output_type="numpy") image = output.images # make sure that more than 3.0 GB is allocated mem_bytes = torch.cuda.max_memory_allocated() assert mem_bytes > 3 * 10**9 max_diff = numpy_cosine_similarity_distance(image.flatten(), image_chunked.flatten()) assert max_diff < 1e-3 def test_stable_diffusion_text2img_pipeline_v_pred_default(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/" "sd2-text2img/astronaut_riding_a_horse_v_pred.npy" ) pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2") pipe.to(torch_device) pipe.enable_attention_slicing() pipe.set_progress_bar_config(disable=None) prompt = "astronaut riding a horse" generator = torch.manual_seed(0) output = pipe(prompt=prompt, guidance_scale=7.5, generator=generator, output_type="np") image = output.images[0] assert image.shape == (768, 768, 3) max_diff = numpy_cosine_similarity_distance(image.flatten(), expected_image.flatten()) assert max_diff < 1e-3 def test_stable_diffusion_text2img_pipeline_unflawed(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/" "sd2-text2img/lion_galaxy.npy" ) pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1") pipe.scheduler = DDIMScheduler.from_config( pipe.scheduler.config, timestep_spacing="trailing", rescale_betas_zero_snr=True ) pipe.to(torch_device) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k" generator = torch.Generator("cpu").manual_seed(0) output = pipe( prompt=prompt, guidance_scale=7.5, num_inference_steps=10, guidance_rescale=0.7, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (768, 768, 3) max_diff = numpy_cosine_similarity_distance(image.flatten(), expected_image.flatten()) assert max_diff < 5e-2 def test_stable_diffusion_text2img_pipeline_v_pred_fp16(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/" "sd2-text2img/astronaut_riding_a_horse_v_pred_fp16.npy" ) pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", torch_dtype=torch.float16) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) prompt = "astronaut riding a horse" generator = torch.manual_seed(0) output = pipe(prompt=prompt, guidance_scale=7.5, generator=generator, output_type="np") image = output.images[0] assert image.shape == (768, 768, 3) max_diff = numpy_cosine_similarity_distance(image.flatten(), expected_image.flatten()) assert max_diff < 1e-3 def test_download_local(self): filename = hf_hub_download("stabilityai/stable-diffusion-2-1", filename="v2-1_768-ema-pruned.safetensors") pipe = StableDiffusionPipeline.from_single_file(filename, torch_dtype=torch.float16) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() image_out = pipe("test", num_inference_steps=1, output_type="np").images[0] assert image_out.shape == (768, 768, 3) def test_download_ckpt_diff_format_is_same(self): single_file_path = ( "https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/v2-1_768-ema-pruned.safetensors" ) pipe_single = StableDiffusionPipeline.from_single_file(single_file_path) pipe_single.scheduler = DDIMScheduler.from_config(pipe_single.scheduler.config) pipe_single.unet.set_attn_processor(AttnProcessor()) pipe_single.enable_model_cpu_offload() generator = torch.Generator(device="cpu").manual_seed(0) image_ckpt = pipe_single("a turtle", num_inference_steps=2, generator=generator, output_type="np").images[0] pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1") pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.unet.set_attn_processor(AttnProcessor()) pipe.enable_model_cpu_offload() generator = torch.Generator(device="cpu").manual_seed(0) image = pipe("a turtle", num_inference_steps=2, generator=generator, output_type="np").images[0] max_diff = numpy_cosine_similarity_distance(image.flatten(), image_ckpt.flatten()) assert max_diff < 1e-3 def test_stable_diffusion_text2img_intermediate_state_v_pred(self): number_of_steps = 0 def test_callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None: test_callback_fn.has_been_called = True nonlocal number_of_steps number_of_steps += 1 if step == 0: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 96, 96) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array([0.7749, 0.0325, 0.5088, 0.1619, 0.3372, 0.3667, -0.5186, 0.6860, 1.4326]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 elif step == 19: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 96, 96) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array([1.3887, 1.0273, 1.7266, 0.0726, 0.6611, 0.1598, -1.0547, 0.1522, 0.0227]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 test_callback_fn.has_been_called = False pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", torch_dtype=torch.float16) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() prompt = "Andromeda galaxy in a bottle" generator = torch.manual_seed(0) pipe( prompt=prompt, num_inference_steps=20, guidance_scale=7.5, generator=generator, callback=test_callback_fn, callback_steps=1, ) assert test_callback_fn.has_been_called assert number_of_steps == 20 def test_stable_diffusion_low_cpu_mem_usage_v_pred(self): pipeline_id = "stabilityai/stable-diffusion-2" start_time = time.time() pipeline_low_cpu_mem_usage = StableDiffusionPipeline.from_pretrained(pipeline_id, torch_dtype=torch.float16) pipeline_low_cpu_mem_usage.to(torch_device) low_cpu_mem_usage_time = time.time() - start_time start_time = time.time() _ = StableDiffusionPipeline.from_pretrained(pipeline_id, torch_dtype=torch.float16, low_cpu_mem_usage=False) normal_load_time = time.time() - start_time assert 2 * low_cpu_mem_usage_time < normal_load_time def test_stable_diffusion_pipeline_with_sequential_cpu_offloading_v_pred(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipeline_id = "stabilityai/stable-diffusion-2" prompt = "Andromeda galaxy in a bottle" pipeline = StableDiffusionPipeline.from_pretrained(pipeline_id, torch_dtype=torch.float16) pipeline = pipeline.to(torch_device) pipeline.enable_attention_slicing(1) pipeline.enable_sequential_cpu_offload() generator = torch.manual_seed(0) _ = pipeline(prompt, generator=generator, num_inference_steps=5) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 2.8 GB is allocated assert mem_bytes < 2.8 * 10**9
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import nightly, require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @nightly @require_flax class FlaxStableDiffusion2PipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() def test_stable_diffusion_flax(self): sd_pipe, params = FlaxStableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2", revision="bf16", dtype=jnp.bfloat16, ) prompt = "A painting of a squirrel eating a burger" num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = sd_pipe.prepare_inputs(prompt) params = replicate(params) prompt_ids = shard(prompt_ids) prng_seed = jax.random.PRNGKey(0) prng_seed = jax.random.split(prng_seed, jax.device_count()) images = sd_pipe(prompt_ids, params, prng_seed, num_inference_steps=25, jit=True)[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) image_slice = images[0, 253:256, 253:256, -1] output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) expected_slice = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512]) print(f"output_slice: {output_slice}") assert jnp.abs(output_slice - expected_slice).max() < 1e-2 @nightly @require_flax class FlaxStableDiffusion2PipelineNightlyTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() def test_stable_diffusion_dpm_flax(self): model_id = "stabilityai/stable-diffusion-2" scheduler, scheduler_params = FlaxDPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler") sd_pipe, params = FlaxStableDiffusionPipeline.from_pretrained( model_id, scheduler=scheduler, revision="bf16", dtype=jnp.bfloat16, ) params["scheduler"] = scheduler_params prompt = "A painting of a squirrel eating a burger" num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = sd_pipe.prepare_inputs(prompt) params = replicate(params) prompt_ids = shard(prompt_ids) prng_seed = jax.random.PRNGKey(0) prng_seed = jax.random.split(prng_seed, jax.device_count()) images = sd_pipe(prompt_ids, params, prng_seed, num_inference_steps=25, jit=True)[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) image_slice = images[0, 253:256, 253:256, -1] output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) expected_slice = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297]) print(f"output_slice: {output_slice}") assert jnp.abs(output_slice - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class FlaxStableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() def test_stable_diffusion_inpaint_pipeline(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) model_id = "xvjiarui/stable-diffusion-2-inpainting" pipeline, params = FlaxStableDiffusionInpaintPipeline.from_pretrained(model_id, safety_checker=None) prompt = "Face of a yellow cat, high resolution, sitting on a park bench" prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] init_image = num_samples * [init_image] mask_image = num_samples * [mask_image] prompt_ids, processed_masked_images, processed_masks = pipeline.prepare_inputs(prompt, init_image, mask_image) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, jax.device_count()) prompt_ids = shard(prompt_ids) processed_masked_images = shard(processed_masked_images) processed_masks = shard(processed_masks) output = pipeline( prompt_ids, processed_masks, processed_masked_images, params, prng_seed, num_inference_steps, jit=True ) images = output.images.reshape(num_samples, 512, 512, 3) image_slice = images[0, 253:256, 253:256, -1] output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) expected_slice = jnp.array( [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] ) print(f"output_slice: {output_slice}") assert jnp.abs(output_slice - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/latent_diffusion/test_latent_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNet2DConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class LDMTextToImagePipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = LDMTextToImagePipeline params = TEXT_TO_IMAGE_PARAMS - { "negative_prompt", "negative_prompt_embeds", "cross_attention_kwargs", "prompt_embeds", } required_optional_params = PipelineTesterMixin.required_optional_params - { "num_images_per_prompt", "callback", "callback_steps", } batch_params = TEXT_TO_IMAGE_BATCH_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=(32, 64), in_channels=3, out_channels=3, down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D"), up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D"), latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vqvae": vae, "bert": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_inference_text2img(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = LDMTextToImagePipeline(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) expected_slice = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 @nightly @require_torch_gpu class LDMTextToImagePipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, dtype=torch.float32, seed=0): generator = torch.manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 32, 32)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_ldm_default_ddim(self): pipe = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256").to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) expected_slice = np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878]) max_diff = np.abs(expected_slice - image_slice).max() assert max_diff < 1e-3 @nightly @require_torch_gpu class LDMTextToImagePipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, dtype=torch.float32, seed=0): generator = torch.manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 32, 32)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_ldm_default_ddim(self): pipe = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256").to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel from diffusers import DDIMScheduler, LDMPipeline, UNet2DModel, VQModel from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch, torch_device enable_full_determinism() class LDMPipelineFastTests(unittest.TestCase): @property def dummy_uncond_unet(self): torch.manual_seed(0) model = UNet2DModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), ) return model @property def dummy_vq_model(self): torch.manual_seed(0) model = VQModel( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=3, ) return model @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(config) def test_inference_uncond(self): unet = self.dummy_uncond_unet scheduler = DDIMScheduler() vae = self.dummy_vq_model ldm = LDMPipeline(unet=unet, vqvae=vae, scheduler=scheduler) ldm.to(torch_device) ldm.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) image = ldm(generator=generator, num_inference_steps=2, output_type="numpy").images generator = torch.manual_seed(0) image_from_tuple = ldm(generator=generator, num_inference_steps=2, output_type="numpy", return_dict=False)[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172]) tolerance = 1e-2 if torch_device != "mps" else 3e-2 assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < tolerance @nightly @require_torch class LDMPipelineIntegrationTests(unittest.TestCase): def test_inference_uncond(self): ldm = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256") ldm.to(torch_device) ldm.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) image = ldm(generator=generator, num_inference_steps=5, output_type="numpy").images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) expected_slice = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447]) tolerance = 1e-2 if torch_device != "mps" else 3e-2 assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, LDMSuperResolutionPipeline, UNet2DModel, VQModel from diffusers.utils import PIL_INTERPOLATION from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, nightly, require_torch, torch_device, ) enable_full_determinism() class LDMSuperResolutionPipelineFastTests(unittest.TestCase): @property def dummy_image(self): batch_size = 1 num_channels = 3 sizes = (32, 32) image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) return image @property def dummy_uncond_unet(self): torch.manual_seed(0) model = UNet2DModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=6, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), ) return model @property def dummy_vq_model(self): torch.manual_seed(0) model = VQModel( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=3, ) return model def test_inference_superresolution(self): device = "cpu" unet = self.dummy_uncond_unet scheduler = DDIMScheduler() vqvae = self.dummy_vq_model ldm = LDMSuperResolutionPipeline(unet=unet, vqvae=vqvae, scheduler=scheduler) ldm.to(device) ldm.set_progress_bar_config(disable=None) init_image = self.dummy_image.to(device) generator = torch.Generator(device=device).manual_seed(0) image = ldm(image=init_image, generator=generator, num_inference_steps=2, output_type="numpy").images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.8678, 0.8245, 0.6381, 0.6830, 0.4385, 0.5599, 0.4641, 0.6201, 0.5150]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") def test_inference_superresolution_fp16(self): unet = self.dummy_uncond_unet scheduler = DDIMScheduler() vqvae = self.dummy_vq_model # put models in fp16 unet = unet.half() vqvae = vqvae.half() ldm = LDMSuperResolutionPipeline(unet=unet, vqvae=vqvae, scheduler=scheduler) ldm.to(torch_device) ldm.set_progress_bar_config(disable=None) init_image = self.dummy_image.to(torch_device) image = ldm(init_image, num_inference_steps=2, output_type="numpy").images assert image.shape == (1, 64, 64, 3) @nightly @require_torch class LDMSuperResolutionPipelineIntegrationTests(unittest.TestCase): def test_inference_superresolution(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/vq_diffusion/teddy_bear_pool.png" ) init_image = init_image.resize((64, 64), resample=PIL_INTERPOLATION["lanczos"]) ldm = LDMSuperResolutionPipeline.from_pretrained("duongna/ldm-super-resolution", device_map="auto") ldm.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) image = ldm(image=init_image, generator=generator, num_inference_steps=20, output_type="numpy").images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) expected_slice = np.array([0.7644, 0.7679, 0.7642, 0.7633, 0.7666, 0.7560, 0.7425, 0.7257, 0.6907]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_instruction_pix2pix.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPix2PixPipeline, UNet2DConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class StableDiffusionInstructPix2PixPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionInstructPix2PixPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"} batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"image_latents"}) - {"negative_prompt_embeds"} def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=8, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB") if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "image_guidance_scale": 1, "output_type": "numpy", } return inputs def test_stable_diffusion_pix2pix_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionInstructPix2PixPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionInstructPix2PixPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" output = sd_pipe(**inputs, negative_prompt=negative_prompt) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_multiple_init_images(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionInstructPix2PixPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["prompt"] = [inputs["prompt"]] * 2 image = np.array(inputs["image"]).astype(np.float32) / 255.0 image = torch.from_numpy(image).unsqueeze(0).to(device) image = image / 2 + 0.5 image = image.permute(0, 3, 1, 2) inputs["image"] = image.repeat(2, 1, 1, 1) image = sd_pipe(**inputs).images image_slice = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) expected_slice = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = EulerAncestralDiscreteScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear" ) sd_pipe = StableDiffusionInstructPix2PixPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] slice = [round(x, 4) for x in image_slice.flatten().tolist()] print(",".join([str(x) for x in slice])) assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) # Overwrite the default test_latents_inputs because pix2pix encode the image differently def test_latents_input(self): components = self.get_dummy_components() pipe = StableDiffusionInstructPix2PixPipeline(**components) pipe.image_processor = VaeImageProcessor(do_resize=False, do_normalize=False) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) out = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="pt"))[0] vae = components["vae"] inputs = self.get_dummy_inputs_by_type(torch_device, input_image_type="pt") for image_param in self.image_latents_params: if image_param in inputs.keys(): inputs[image_param] = vae.encode(inputs[image_param]).latent_dist.mode() out_latents_inputs = pipe(**inputs)[0] max_diff = np.abs(out - out_latents_inputs).max() self.assertLess(max_diff, 1e-4, "passing latents as image input generate different result from passing image") # Override the default test_callback_cfg because pix2pix create inputs for cfg differently def test_callback_cfg(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) def callback_no_cfg(pipe, i, t, callback_kwargs): if i == 1: for k, w in callback_kwargs.items(): if k in self.callback_cfg_params: callback_kwargs[k] = callback_kwargs[k].chunk(3)[0] pipe._guidance_scale = 1.0 return callback_kwargs inputs = self.get_dummy_inputs(torch_device) inputs["guidance_scale"] = 1.0 inputs["num_inference_steps"] = 2 out_no_cfg = pipe(**inputs)[0] inputs["guidance_scale"] = 7.5 inputs["callback_on_step_end"] = callback_no_cfg inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs out_callback_no_cfg = pipe(**inputs)[0] assert out_no_cfg.shape == out_callback_no_cfg.shape @slow @require_torch_gpu class StableDiffusionInstructPix2PixPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, seed=0): generator = torch.manual_seed(seed) image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" ) inputs = { "prompt": "turn him into a cyborg", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "image_guidance_scale": 1.0, "output_type": "numpy", } return inputs def test_stable_diffusion_pix2pix_default(self): pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained( "timbrooks/instruct-pix2pix", safety_checker=None ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_k_lms(self): pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained( "timbrooks/instruct-pix2pix", safety_checker=None ) pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_ddim(self): pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained( "timbrooks/instruct-pix2pix", safety_checker=None ) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_intermediate_state(self): number_of_steps = 0 def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None: callback_fn.has_been_called = True nonlocal number_of_steps number_of_steps += 1 if step == 1: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 elif step == 2: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 callback_fn.has_been_called = False pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained( "timbrooks/instruct-pix2pix", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() pipe(**inputs, callback=callback_fn, callback_steps=1) assert callback_fn.has_been_called assert number_of_steps == 3 def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained( "timbrooks/instruct-pix2pix", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() inputs = self.get_inputs() _ = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def test_stable_diffusion_pix2pix_pipeline_multiple_of_8(self): inputs = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 inputs["image"] = inputs["image"].resize((504, 504)) model_id = "timbrooks/instruct-pix2pix" pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained( model_id, safety_checker=None, ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() output = pipe(**inputs) image = output.images[0] image_slice = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) expected_slice = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_gligen_text_image.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import ( CLIPProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, StableDiffusionGLIGENTextImagePipeline, UNet2DConditionModel, ) from diffusers.pipelines.stable_diffusion import CLIPImageProjection from diffusers.utils import load_image from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class GligenTextImagePipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionGLIGENTextImagePipeline params = TEXT_TO_IMAGE_PARAMS | {"gligen_phrases", "gligen_images", "gligen_boxes"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, attention_type="gated-text-image", ) # unet.position_net = PositionNet(32,32) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") image_encoder_config = CLIPVisionConfig( hidden_size=32, projection_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, ) image_encoder = CLIPVisionModelWithProjection(image_encoder_config) processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14") image_project = CLIPImageProjection(hidden_size=32) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": image_encoder, "image_project": image_project, "processor": processor, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) gligen_images = load_image( "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/gligen/livingroom_modern.png" ) inputs = { "prompt": "A modern livingroom", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "gligen_phrases": ["a birthday cake"], "gligen_images": [gligen_images], "gligen_boxes": [[0.2676, 0.6088, 0.4773, 0.7183]], "output_type": "np", } return inputs def test_stable_diffusion_gligen_text_image_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionGLIGENTextImagePipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5069, 0.5561, 0.4577, 0.4792, 0.5203, 0.4089, 0.5039, 0.4919, 0.4499]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_gligen_k_euler_ancestral(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionGLIGENTextImagePipeline(**components) sd_pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.425, 0.494, 0.429, 0.469, 0.525, 0.417, 0.533, 0.5, 0.47]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(batch_size=3, expected_max_diff=3e-3)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import traceback import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, AutoencoderTiny, DDIMScheduler, DPMSolverMultistepScheduler, HeunDiscreteScheduler, LCMScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionImg2ImgPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, require_python39_or_higher, require_torch_2, require_torch_gpu, run_test_in_subprocess, skip_mps, slow, torch_device, ) from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() # Will be run via run_test_in_subprocess def _test_img2img_compile(in_queue, out_queue, timeout): error = None try: inputs = in_queue.get(timeout=timeout) torch_device = inputs.pop("torch_device") seed = inputs.pop("seed") inputs["generator"] = torch.Generator(device=torch_device).manual_seed(seed) pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.unet.to(memory_format=torch.channels_last) pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 768, 3) expected_slice = np.array([0.0606, 0.0570, 0.0805, 0.0579, 0.0628, 0.0623, 0.0843, 0.1115, 0.0806]) assert np.abs(expected_slice - image_slice).max() < 1e-3 except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() class StableDiffusionImg2ImgPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, time_cond_proj_dim=time_cond_proj_dim, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_tiny_autoencoder(self): return AutoencoderTiny(in_channels=3, out_channels=3, latent_channels=4) def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image / 2 + 0.5 if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_stable_diffusion_img2img_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4555, 0.3216, 0.4049, 0.4620, 0.4618, 0.4126, 0.4122, 0.4629, 0.4579]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_img2img_default_case_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionImg2ImgPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.5709, 0.4614, 0.4587, 0.5978, 0.5298, 0.6910, 0.6240, 0.5212, 0.5454]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_img2img_default_case_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionImg2ImgPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.5709, 0.4614, 0.4587, 0.5978, 0.5298, 0.6910, 0.6240, 0.5212, 0.5454]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_img2img_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" output = sd_pipe(**inputs, negative_prompt=negative_prompt) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4593, 0.3408, 0.4232, 0.4749, 0.4476, 0.4115, 0.4357, 0.4733, 0.4663]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_img2img_multiple_init_images(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["prompt"] = [inputs["prompt"]] * 2 inputs["image"] = inputs["image"].repeat(2, 1, 1, 1) image = sd_pipe(**inputs).images image_slice = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) expected_slice = np.array([0.4241, 0.5576, 0.5711, 0.4792, 0.4311, 0.5952, 0.5827, 0.5138, 0.5109]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_img2img_k_lms(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = LMSDiscreteScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear" ) sd_pipe = StableDiffusionImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4398, 0.4949, 0.4337, 0.6580, 0.5555, 0.4338, 0.5769, 0.5955, 0.5175]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_img2img_tiny_autoencoder(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionImg2ImgPipeline(**components) sd_pipe.vae = self.get_dummy_tiny_autoencoder() sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.00669, 0.00669, 0.0, 0.00693, 0.00858, 0.0, 0.00567, 0.00515, 0.00125]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 @skip_mps def test_save_load_local(self): return super().test_save_load_local() @skip_mps def test_dict_tuple_outputs_equivalent(self): return super().test_dict_tuple_outputs_equivalent() @skip_mps def test_save_load_optional_components(self): return super().test_save_load_optional_components() @skip_mps def test_attention_slicing_forward_pass(self): return super().test_attention_slicing_forward_pass(expected_max_diff=5e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) @slow @require_torch_gpu class StableDiffusionImg2ImgPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_img2img/sketch-mountains-input.png" ) inputs = { "prompt": "a fantasy landscape, concept art, high resolution", "image": init_image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_stable_diffusion_img2img_default(self): pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 768, 3) expected_slice = np.array([0.4300, 0.4662, 0.4930, 0.3990, 0.4307, 0.4525, 0.3719, 0.4064, 0.3923]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def test_stable_diffusion_img2img_k_lms(self): pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None) pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 768, 3) expected_slice = np.array([0.0389, 0.0346, 0.0415, 0.0290, 0.0218, 0.0210, 0.0408, 0.0567, 0.0271]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def test_stable_diffusion_img2img_ddim(self): pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 768, 3) expected_slice = np.array([0.0593, 0.0607, 0.0851, 0.0582, 0.0636, 0.0721, 0.0751, 0.0981, 0.0781]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def test_stable_diffusion_img2img_intermediate_state(self): number_of_steps = 0 def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None: callback_fn.has_been_called = True nonlocal number_of_steps number_of_steps += 1 if step == 1: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 96) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array([-0.4958, 0.5107, 1.1045, 2.7539, 4.6680, 3.8320, 1.5049, 1.8633, 2.6523]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 elif step == 2: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 96) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array([-0.4956, 0.5078, 1.0918, 2.7520, 4.6484, 3.8125, 1.5146, 1.8633, 2.6367]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 callback_fn.has_been_called = False pipe = StableDiffusionImg2ImgPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device, dtype=torch.float16) pipe(**inputs, callback=callback_fn, callback_steps=1) assert callback_fn.has_been_called assert number_of_steps == 2 def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionImg2ImgPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() inputs = self.get_inputs(torch_device, dtype=torch.float16) _ = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def test_stable_diffusion_pipeline_with_model_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() inputs = self.get_inputs(torch_device, dtype=torch.float16) # Normal inference pipe = StableDiffusionImg2ImgPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16, ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # With model offloading # Reload but don't move to cuda pipe = StableDiffusionImg2ImgPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16, ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) _ = pipe(**inputs) mem_bytes_offloaded = torch.cuda.max_memory_allocated() assert mem_bytes_offloaded < mem_bytes for module in pipe.text_encoder, pipe.unet, pipe.vae: assert module.device == torch.device("cpu") def test_img2img_2nd_order(self): sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") sd_pipe.scheduler = HeunDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 10 inputs["strength"] = 0.75 image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/img2img_heun.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 5e-2 inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 11 inputs["strength"] = 0.75 image_other = sd_pipe(**inputs).images[0] mean_diff = np.abs(image - image_other).mean() # images should be very similar assert mean_diff < 5e-2 def test_stable_diffusion_img2img_pipeline_multiple_of_8(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) # resize to resolution that is divisible by 8 but not 16 or 32 init_image = init_image.resize((760, 504)) model_id = "CompVis/stable-diffusion-v1-4" pipe = StableDiffusionImg2ImgPipeline.from_pretrained( model_id, safety_checker=None, ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() prompt = "A fantasy landscape, trending on artstation" generator = torch.manual_seed(0) output = pipe( prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5, generator=generator, output_type="np", ) image = output.images[0] image_slice = image[255:258, 383:386, -1] assert image.shape == (504, 760, 3) expected_slice = np.array([0.9393, 0.9500, 0.9399, 0.9438, 0.9458, 0.9400, 0.9455, 0.9414, 0.9423]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 def test_img2img_safety_checker_works(self): sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 20 # make sure the safety checker is activated inputs["prompt"] = "naked, sex, porn" out = sd_pipe(**inputs) assert out.nsfw_content_detected[0], f"Safety checker should work for prompt: {inputs['prompt']}" assert np.abs(out.images[0]).sum() < 1e-5 # should be all zeros @require_python39_or_higher @require_torch_2 def test_img2img_compile(self): seed = 0 inputs = self.get_inputs(torch_device, seed=seed) # Can't pickle a Generator object del inputs["generator"] inputs["torch_device"] = torch_device inputs["seed"] = seed run_test_in_subprocess(test_case=self, target_func=_test_img2img_compile, inputs=inputs) @nightly @require_torch_gpu class StableDiffusionImg2ImgPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_img2img/sketch-mountains-input.png" ) inputs = { "prompt": "a fantasy landscape, concept art, high resolution", "image": init_image, "generator": generator, "num_inference_steps": 50, "strength": 0.75, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_img2img_pndm(self): sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_img2img/stable_diffusion_1_5_pndm.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_img2img_ddim(self): sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_img2img/stable_diffusion_1_5_ddim.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_img2img_lms(self): sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_img2img/stable_diffusion_1_5_lms.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_img2img_dpm(self): sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 30 image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_img2img/stable_diffusion_1_5_dpm.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import numpy as np from diffusers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class OnnxStableDiffusionPipelineFastTests(OnnxPipelineTesterMixin, unittest.TestCase): hub_checkpoint = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" def get_dummy_inputs(self, seed=0): generator = np.random.RandomState(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_pipeline_default_ddim(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_pipeline_pndm(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=True) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_pipeline_lms(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_pipeline_euler(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_pipeline_euler_ancestral(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_pipeline_dpm_multistep(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_prompt_embeds(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] inputs = self.get_dummy_inputs() prompt = 3 * [inputs.pop("prompt")] text_inputs = pipe.tokenizer( prompt, padding="max_length", max_length=pipe.tokenizer.model_max_length, truncation=True, return_tensors="np", ) text_inputs = text_inputs["input_ids"] prompt_embeds = pipe.text_encoder(input_ids=text_inputs.astype(np.int32))[0] inputs["prompt_embeds"] = prompt_embeds # forward output = pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_stable_diffusion_negative_prompt_embeds(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] inputs = self.get_dummy_inputs() prompt = 3 * [inputs.pop("prompt")] embeds = [] for p in [prompt, negative_prompt]: text_inputs = pipe.tokenizer( p, padding="max_length", max_length=pipe.tokenizer.model_max_length, truncation=True, return_tensors="np", ) text_inputs = text_inputs["input_ids"] embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.int32))[0]) inputs["prompt_embeds"], inputs["negative_prompt_embeds"] = embeds # forward output = pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 @nightly @require_onnxruntime @require_torch_gpu class OnnxStableDiffusionPipelineIntegrationTests(unittest.TestCase): @property def gpu_provider(self): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def gpu_options(self): options = ort.SessionOptions() options.enable_mem_pattern = False return options def test_inference_default_pndm(self): # using the PNDM scheduler by default sd_pipe = OnnxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=None, feature_extractor=None, provider=self.gpu_provider, sess_options=self.gpu_options, ) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" np.random.seed(0) output = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=10, output_type="np") image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_inference_ddim(self): ddim_scheduler = DDIMScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx" ) sd_pipe = OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", revision="onnx", scheduler=ddim_scheduler, safety_checker=None, feature_extractor=None, provider=self.gpu_provider, sess_options=self.gpu_options, ) sd_pipe.set_progress_bar_config(disable=None) prompt = "open neural network exchange" generator = np.random.RandomState(0) output = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=generator, output_type="np") image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_inference_k_lms(self): lms_scheduler = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx" ) sd_pipe = OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", revision="onnx", scheduler=lms_scheduler, safety_checker=None, feature_extractor=None, provider=self.gpu_provider, sess_options=self.gpu_options, ) sd_pipe.set_progress_bar_config(disable=None) prompt = "open neural network exchange" generator = np.random.RandomState(0) output = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=generator, output_type="np") image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_intermediate_state(self): number_of_steps = 0 def test_callback_fn(step: int, timestep: int, latents: np.ndarray) -> None: test_callback_fn.has_been_called = True nonlocal number_of_steps number_of_steps += 1 if step == 0: assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3 elif step == 5: assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3 test_callback_fn.has_been_called = False pipe = OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", revision="onnx", safety_checker=None, feature_extractor=None, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=None) prompt = "Andromeda galaxy in a bottle" generator = np.random.RandomState(0) pipe( prompt=prompt, num_inference_steps=5, guidance_scale=7.5, generator=generator, callback=test_callback_fn, callback_steps=1, ) assert test_callback_fn.has_been_called assert number_of_steps == 6 def test_stable_diffusion_no_safety_checker(self): pipe = OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", revision="onnx", safety_checker=None, feature_extractor=None, provider=self.gpu_provider, sess_options=self.gpu_options, ) assert isinstance(pipe, OnnxStableDiffusionPipeline) assert pipe.safety_checker is None image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(tmpdirname) pipe = OnnxStableDiffusionPipeline.from_pretrained(tmpdirname) # sanity check that the pipeline still works assert pipe.safety_checker is None image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_sag.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class StableDiffusionSAGPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_class = StableDiffusionSAGPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": ".", "generator": generator, "num_inference_steps": 2, "guidance_scale": 1.0, "sag_scale": 1.0, "output_type": "numpy", } return inputs def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @nightly @require_torch_gpu class StableDiffusionPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_diffusion_1(self): sag_pipe = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") sag_pipe = sag_pipe.to(torch_device) sag_pipe.set_progress_bar_config(disable=None) prompt = "." generator = torch.manual_seed(0) output = sag_pipe( [prompt], generator=generator, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=20, output_type="np" ) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 def test_stable_diffusion_2(self): sag_pipe = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base") sag_pipe = sag_pipe.to(torch_device) sag_pipe.set_progress_bar_config(disable=None) prompt = "." generator = torch.manual_seed(0) output = sag_pipe( [prompt], generator=generator, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=20, output_type="np" ) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 def test_stable_diffusion_2_non_square(self): sag_pipe = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base") sag_pipe = sag_pipe.to(torch_device) sag_pipe.set_progress_bar_config(disable=None) prompt = "." generator = torch.manual_seed(0) output = sag_pipe( [prompt], width=768, height=512, generator=generator, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=20, output_type="np", ) image = output.images assert image.shape == (1, 512, 768, 3)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_gligen.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, StableDiffusionGLIGENPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class GligenPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionGLIGENPipeline params = TEXT_TO_IMAGE_PARAMS | {"gligen_phrases", "gligen_boxes"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, attention_type="gated", ) # unet.position_net = PositionNet(32,32) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A modern livingroom", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "gligen_phrases": ["a birthday cake"], "gligen_boxes": [[0.2676, 0.6088, 0.4773, 0.7183]], "output_type": "np", } return inputs def test_stable_diffusion_gligen_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionGLIGENPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5069, 0.5561, 0.4577, 0.4792, 0.5203, 0.4089, 0.5039, 0.4919, 0.4499]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_gligen_k_euler_ancestral(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionGLIGENPipeline(**components) sd_pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.425, 0.494, 0.429, 0.469, 0.525, 0.417, 0.533, 0.5, 0.47]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(batch_size=3, expected_max_diff=3e-3)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_inpaint_legacy.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class StableDiffusionOnnxInpaintLegacyPipelineIntegrationTests(unittest.TestCase): @property def gpu_provider(self): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def gpu_options(self): options = ort.SessionOptions() options.enable_mem_pattern = False return options def test_inference(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) mask_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" ) # using the PNDM scheduler by default pipe = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=None, feature_extractor=None, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=None) prompt = "A red cat sitting on a park bench" generator = np.random.RandomState(0) output = pipe( prompt=prompt, image=init_image, mask_image=mask_image, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_model_editing.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, PNDMScheduler, StableDiffusionModelEditingPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, skip_mps, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class StableDiffusionModelEditingPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionModelEditingPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = DDIMScheduler() torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): generator = torch.manual_seed(seed) inputs = { "prompt": "A field of roses", "generator": generator, # Setting height and width to None to prevent OOMs on CPU. "height": None, "width": None, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_stable_diffusion_model_editing_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionModelEditingPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4755, 0.5132, 0.4976, 0.3904, 0.3554, 0.4765, 0.5139, 0.5158, 0.4889]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_model_editing_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionModelEditingPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" output = sd_pipe(**inputs, negative_prompt=negative_prompt) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4992, 0.5101, 0.5004, 0.3949, 0.3604, 0.4735, 0.5216, 0.5204, 0.4913]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_model_editing_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = EulerAncestralDiscreteScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear" ) sd_pipe = StableDiffusionModelEditingPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4747, 0.5372, 0.4779, 0.4982, 0.5543, 0.4816, 0.5238, 0.4904, 0.5027]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_model_editing_pndm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler() sd_pipe = StableDiffusionModelEditingPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) # the pipeline does not expect pndm so test if it raises error. with self.assertRaises(ValueError): _ = sd_pipe(**inputs).images def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=5e-3) def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=5e-3) @nightly @require_torch_gpu class StableDiffusionModelEditingSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, seed=0): generator = torch.manual_seed(seed) inputs = { "prompt": "A field of roses", "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_stable_diffusion_model_editing_default(self): model_ckpt = "CompVis/stable-diffusion-v1-4" pipe = StableDiffusionModelEditingPipeline.from_pretrained(model_ckpt, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array( [0.6749496, 0.6386453, 0.51443267, 0.66094905, 0.61921215, 0.5491332, 0.5744417, 0.58075106, 0.5174658] ) assert np.abs(expected_slice - image_slice).max() < 1e-2 # make sure image changes after editing pipe.edit_model("A pack of roses", "A pack of blue roses") image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) assert np.abs(expected_slice - image_slice).max() > 1e-1 def test_stable_diffusion_model_editing_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() model_ckpt = "CompVis/stable-diffusion-v1-4" scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler") pipe = StableDiffusionModelEditingPipeline.from_pretrained( model_ckpt, scheduler=scheduler, safety_checker=None ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() inputs = self.get_inputs() _ = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 4.4 GB is allocated assert mem_bytes < 4.4 * 10**9
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_k_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device enable_full_determinism() @nightly @require_torch_gpu class StableDiffusionPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_diffusion_1(self): sd_pipe = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) sd_pipe.set_scheduler("sample_euler") prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) output = sd_pipe([prompt], generator=generator, guidance_scale=9.0, num_inference_steps=20, output_type="np") image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_2(self): sd_pipe = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base") sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) sd_pipe.set_scheduler("sample_euler") prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) output = sd_pipe([prompt], generator=generator, guidance_scale=9.0, num_inference_steps=20, output_type="np") image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-1 def test_stable_diffusion_karras_sigmas(self): sd_pipe = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base") sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) sd_pipe.set_scheduler("sample_dpmpp_2m") prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) output = sd_pipe( [prompt], generator=generator, guidance_scale=7.5, num_inference_steps=15, output_type="np", use_karras_sigmas=True, ) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array( [0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_noise_sampler_seed(self): sd_pipe = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) sd_pipe.set_scheduler("sample_dpmpp_sde") prompt = "A painting of a squirrel eating a burger" seed = 0 images1 = sd_pipe( [prompt], generator=torch.manual_seed(seed), noise_sampler_seed=seed, guidance_scale=9.0, num_inference_steps=20, output_type="np", ).images images2 = sd_pipe( [prompt], generator=torch.manual_seed(seed), noise_sampler_seed=seed, guidance_scale=9.0, num_inference_steps=20, output_type="np", ).images assert images1.shape == (1, 512, 512, 3) assert images2.shape == (1, 512, 512, 3) assert np.abs(images1.flatten() - images2.flatten()).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_paradigms.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMParallelScheduler, DDPMParallelScheduler, StableDiffusionParadigmsPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, nightly, require_torch_gpu, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class StableDiffusionParadigmsPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_class = StableDiffusionParadigmsPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, ) scheduler = DDIMParallelScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=512, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "a photograph of an astronaut riding a horse", "generator": generator, "num_inference_steps": 10, "guidance_scale": 6.0, "output_type": "numpy", "parallel": 3, "debug": True, } return inputs def test_stable_diffusion_paradigms_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionParadigmsPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4773, 0.5417, 0.4723, 0.4925, 0.5631, 0.4752, 0.5240, 0.4935, 0.5023]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_paradigms_default_case_ddpm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() torch.manual_seed(0) components["scheduler"] = DDPMParallelScheduler() torch.manual_seed(0) sd_pipe = StableDiffusionParadigmsPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3573, 0.4420, 0.4960, 0.4799, 0.3796, 0.3879, 0.4819, 0.4365, 0.4468]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 # override to speed the overall test timing up. def test_inference_batch_consistent(self): super().test_inference_batch_consistent(batch_sizes=[1, 2]) # override to speed the overall test timing up. def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(batch_size=2, expected_max_diff=3e-3) def test_stable_diffusion_paradigms_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionParadigmsPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" output = sd_pipe(**inputs, negative_prompt=negative_prompt) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4771, 0.5420, 0.4683, 0.4918, 0.5636, 0.4725, 0.5230, 0.4923, 0.5015]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @nightly @require_torch_gpu class StableDiffusionParadigmsPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, seed=0): generator = torch.Generator(device=torch_device).manual_seed(seed) inputs = { "prompt": "a photograph of an astronaut riding a horse", "generator": generator, "num_inference_steps": 10, "guidance_scale": 7.5, "output_type": "numpy", "parallel": 3, "debug": True, } return inputs def test_stable_diffusion_paradigms_default(self): model_ckpt = "stabilityai/stable-diffusion-2-base" scheduler = DDIMParallelScheduler.from_pretrained(model_ckpt, subfolder="scheduler") pipe = StableDiffusionParadigmsPipeline.from_pretrained(model_ckpt, scheduler=scheduler, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.9622, 0.9602, 0.9748, 0.9591, 0.9630, 0.9691, 0.9661, 0.9631, 0.9741]) assert np.abs(expected_slice - image_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_adapter.py
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from parameterized import parameterized from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, LCMScheduler, MultiAdapter, PNDMScheduler, StableDiffusionAdapterPipeline, T2IAdapter, UNet2DConditionModel, ) from diffusers.utils import logging from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class AdapterTests: pipeline_class = StableDiffusionAdapterPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS def get_dummy_components(self, adapter_type, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, time_cond_proj_dim=time_cond_proj_dim, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") torch.manual_seed(0) if adapter_type == "full_adapter" or adapter_type == "light_adapter": adapter = T2IAdapter( in_channels=3, channels=[32, 64], num_res_blocks=2, downscale_factor=2, adapter_type=adapter_type, ) elif adapter_type == "multi_adapter": adapter = MultiAdapter( [ T2IAdapter( in_channels=3, channels=[32, 64], num_res_blocks=2, downscale_factor=2, adapter_type="full_adapter", ), T2IAdapter( in_channels=3, channels=[32, 64], num_res_blocks=2, downscale_factor=2, adapter_type="full_adapter", ), ] ) else: raise ValueError( f"Unknown adapter type: {adapter_type}, must be one of 'full_adapter', 'light_adapter', or 'multi_adapter''" ) components = { "adapter": adapter, "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_components_with_full_downscaling(self, adapter_type): """Get dummy components with x8 VAE downscaling and 4 UNet down blocks. These dummy components are intended to fully-exercise the T2I-Adapter downscaling behavior. """ torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 32, 32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"), cross_attention_dim=32, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 32, 32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") torch.manual_seed(0) if adapter_type == "full_adapter" or adapter_type == "light_adapter": adapter = T2IAdapter( in_channels=3, channels=[32, 32, 32, 64], num_res_blocks=2, downscale_factor=8, adapter_type=adapter_type, ) elif adapter_type == "multi_adapter": adapter = MultiAdapter( [ T2IAdapter( in_channels=3, channels=[32, 32, 32, 64], num_res_blocks=2, downscale_factor=8, adapter_type="full_adapter", ), T2IAdapter( in_channels=3, channels=[32, 32, 32, 64], num_res_blocks=2, downscale_factor=8, adapter_type="full_adapter", ), ] ) else: raise ValueError( f"Unknown adapter type: {adapter_type}, must be one of 'full_adapter', 'light_adapter', or 'multi_adapter''" ) components = { "adapter": adapter, "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0, height=64, width=64, num_images=1): if num_images == 1: image = floats_tensor((1, 3, height, width), rng=random.Random(seed)).to(device) else: image = [ floats_tensor((1, 3, height, width), rng=random.Random(seed)).to(device) for _ in range(num_images) ] if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) @parameterized.expand( [ # (dim=264) The internal feature map will be 33x33 after initial pixel unshuffling (downscaled x8). (((4 * 8 + 1) * 8),), # (dim=272) The internal feature map will be 17x17 after the first T2I down block (downscaled x16). (((4 * 4 + 1) * 16),), # (dim=288) The internal feature map will be 9x9 after the second T2I down block (downscaled x32). (((4 * 2 + 1) * 32),), # (dim=320) The internal feature map will be 5x5 after the third T2I down block (downscaled x64). (((4 * 1 + 1) * 64),), ] ) def test_multiple_image_dimensions(self, dim): """Test that the T2I-Adapter pipeline supports any input dimension that is divisible by the adapter's `downscale_factor`. This test was added in response to an issue where the T2I Adapter's downscaling padding behavior did not match the UNet's behavior. Note that we have selected `dim` values to produce odd resolutions at each downscaling level. """ components = self.get_dummy_components_with_full_downscaling() sd_pipe = StableDiffusionAdapterPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device, height=dim, width=dim) image = sd_pipe(**inputs).images assert image.shape == (1, dim, dim, 3) def test_adapter_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionAdapterPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4535, 0.5493, 0.4359, 0.5452, 0.6086, 0.4441, 0.5544, 0.501, 0.4859]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_adapter_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionAdapterPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4535, 0.5493, 0.4359, 0.5452, 0.6086, 0.4441, 0.5544, 0.501, 0.4859]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 class StableDiffusionFullAdapterPipelineFastTests(AdapterTests, PipelineTesterMixin, unittest.TestCase): def get_dummy_components(self, time_cond_proj_dim=None): return super().get_dummy_components("full_adapter", time_cond_proj_dim=time_cond_proj_dim) def get_dummy_components_with_full_downscaling(self): return super().get_dummy_components_with_full_downscaling("full_adapter") def test_stable_diffusion_adapter_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionAdapterPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4858, 0.5500, 0.4278, 0.4669, 0.6184, 0.4322, 0.5010, 0.5033, 0.4746]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 class StableDiffusionLightAdapterPipelineFastTests(AdapterTests, PipelineTesterMixin, unittest.TestCase): def get_dummy_components(self, time_cond_proj_dim=None): return super().get_dummy_components("light_adapter", time_cond_proj_dim=time_cond_proj_dim) def get_dummy_components_with_full_downscaling(self): return super().get_dummy_components_with_full_downscaling("light_adapter") def test_stable_diffusion_adapter_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionAdapterPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4965, 0.5548, 0.4330, 0.4771, 0.6226, 0.4382, 0.5037, 0.5071, 0.4782]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 class StableDiffusionMultiAdapterPipelineFastTests(AdapterTests, PipelineTesterMixin, unittest.TestCase): def get_dummy_components(self, time_cond_proj_dim=None): return super().get_dummy_components("multi_adapter", time_cond_proj_dim=time_cond_proj_dim) def get_dummy_components_with_full_downscaling(self): return super().get_dummy_components_with_full_downscaling("multi_adapter") def get_dummy_inputs(self, device, height=64, width=64, seed=0): inputs = super().get_dummy_inputs(device, seed, height=height, width=width, num_images=2) inputs["adapter_conditioning_scale"] = [0.5, 0.5] return inputs def test_stable_diffusion_adapter_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionAdapterPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4902, 0.5539, 0.4317, 0.4682, 0.6190, 0.4351, 0.5018, 0.5046, 0.4772]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 def test_inference_batch_consistent( self, batch_sizes=[2, 4, 13], additional_params_copy_to_batched_inputs=["num_inference_steps"] ): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) logger = logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # batchify inputs for batch_size in batch_sizes: batched_inputs = {} for name, value in inputs.items(): if name in self.batch_params: # prompt is string if name == "prompt": len_prompt = len(value) # make unequal batch sizes batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] # make last batch super long batched_inputs[name][-1] = 100 * "very long" elif name == "image": batched_images = [] for image in value: batched_images.append(batch_size * [image]) batched_inputs[name] = batched_images else: batched_inputs[name] = batch_size * [value] elif name == "batch_size": batched_inputs[name] = batch_size else: batched_inputs[name] = value for arg in additional_params_copy_to_batched_inputs: batched_inputs[arg] = inputs[arg] batched_inputs["output_type"] = "np" if self.pipeline_class.__name__ == "DanceDiffusionPipeline": batched_inputs.pop("output_type") output = pipe(**batched_inputs) assert len(output[0]) == batch_size batched_inputs["output_type"] = "np" if self.pipeline_class.__name__ == "DanceDiffusionPipeline": batched_inputs.pop("output_type") output = pipe(**batched_inputs)[0] assert output.shape[0] == batch_size logger.setLevel(level=diffusers.logging.WARNING) def test_num_images_per_prompt(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) batch_sizes = [1, 2] num_images_per_prompts = [1, 2] for batch_size in batch_sizes: for num_images_per_prompt in num_images_per_prompts: inputs = self.get_dummy_inputs(torch_device) for key in inputs.keys(): if key in self.batch_params: if key == "image": batched_images = [] for image in inputs[key]: batched_images.append(batch_size * [image]) inputs[key] = batched_images else: inputs[key] = batch_size * [inputs[key]] images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0] assert images.shape[0] == batch_size * num_images_per_prompt def test_inference_batch_single_identical( self, batch_size=3, test_max_difference=None, test_mean_pixel_difference=None, relax_max_difference=False, expected_max_diff=2e-3, additional_params_copy_to_batched_inputs=["num_inference_steps"], ): if test_max_difference is None: # TODO(Pedro) - not sure why, but not at all reproducible at the moment it seems # make sure that batched and non-batched is identical test_max_difference = torch_device != "mps" if test_mean_pixel_difference is None: # TODO same as above test_mean_pixel_difference = torch_device != "mps" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) logger = logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # batchify inputs batched_inputs = {} batch_size = batch_size for name, value in inputs.items(): if name in self.batch_params: # prompt is string if name == "prompt": len_prompt = len(value) # make unequal batch sizes batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] # make last batch super long batched_inputs[name][-1] = 100 * "very long" elif name == "image": batched_images = [] for image in value: batched_images.append(batch_size * [image]) batched_inputs[name] = batched_images else: batched_inputs[name] = batch_size * [value] elif name == "batch_size": batched_inputs[name] = batch_size elif name == "generator": batched_inputs[name] = [self.get_generator(i) for i in range(batch_size)] else: batched_inputs[name] = value for arg in additional_params_copy_to_batched_inputs: batched_inputs[arg] = inputs[arg] if self.pipeline_class.__name__ != "DanceDiffusionPipeline": batched_inputs["output_type"] = "np" output_batch = pipe(**batched_inputs) assert output_batch[0].shape[0] == batch_size inputs["generator"] = self.get_generator(0) output = pipe(**inputs) logger.setLevel(level=diffusers.logging.WARNING) if test_max_difference: if relax_max_difference: # Taking the median of the largest <n> differences # is resilient to outliers diff = np.abs(output_batch[0][0] - output[0][0]) diff = diff.flatten() diff.sort() max_diff = np.median(diff[-5:]) else: max_diff = np.abs(output_batch[0][0] - output[0][0]).max() assert max_diff < expected_max_diff if test_mean_pixel_difference: assert_mean_pixel_difference(output_batch[0][0], output[0][0]) @slow @require_torch_gpu class StableDiffusionAdapterPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_diffusion_adapter_color(self): adapter_model = "TencentARC/t2iadapter_color_sd14v1" sd_model = "CompVis/stable-diffusion-v1-4" prompt = "snail" image_url = ( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/color.png" ) input_channels = 3 out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_color_sd14v1.npy" image = load_image(image_url) expected_out = load_numpy(out_url) if input_channels == 1: image = image.convert("L") adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16) pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() generator = torch.Generator(device="cpu").manual_seed(0) out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_adapter_depth(self): adapter_model = "TencentARC/t2iadapter_depth_sd14v1" sd_model = "CompVis/stable-diffusion-v1-4" prompt = "snail" image_url = ( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/color.png" ) input_channels = 3 out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_color_sd14v1.npy" image = load_image(image_url) expected_out = load_numpy(out_url) if input_channels == 1: image = image.convert("L") adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16) pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() generator = torch.Generator(device="cpu").manual_seed(0) out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_adapter_depth_sd_v14(self): adapter_model = "TencentARC/t2iadapter_depth_sd14v1" sd_model = "CompVis/stable-diffusion-v1-4" prompt = "desk" image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/desk_depth.png" input_channels = 3 out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_depth_sd14v1.npy" image = load_image(image_url) expected_out = load_numpy(out_url) if input_channels == 1: image = image.convert("L") adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16) pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() generator = torch.Generator(device="cpu").manual_seed(0) out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_adapter_depth_sd_v15(self): adapter_model = "TencentARC/t2iadapter_depth_sd15v2" sd_model = "runwayml/stable-diffusion-v1-5" prompt = "desk" image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/desk_depth.png" input_channels = 3 out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_depth_sd15v2.npy" image = load_image(image_url) expected_out = load_numpy(out_url) if input_channels == 1: image = image.convert("L") adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16) pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() generator = torch.Generator(device="cpu").manual_seed(0) out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_adapter_keypose_sd_v14(self): adapter_model = "TencentARC/t2iadapter_keypose_sd14v1" sd_model = "CompVis/stable-diffusion-v1-4" prompt = "person" image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/person_keypose.png" input_channels = 3 out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_keypose_sd14v1.npy" image = load_image(image_url) expected_out = load_numpy(out_url) if input_channels == 1: image = image.convert("L") adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16) pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() generator = torch.Generator(device="cpu").manual_seed(0) out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_adapter_openpose_sd_v14(self): adapter_model = "TencentARC/t2iadapter_openpose_sd14v1" sd_model = "CompVis/stable-diffusion-v1-4" prompt = "person" image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/iron_man_pose.png" input_channels = 3 out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_openpose_sd14v1.npy" image = load_image(image_url) expected_out = load_numpy(out_url) if input_channels == 1: image = image.convert("L") adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16) pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() generator = torch.Generator(device="cpu").manual_seed(0) out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_adapter_seg_sd_v14(self): adapter_model = "TencentARC/t2iadapter_seg_sd14v1" sd_model = "CompVis/stable-diffusion-v1-4" prompt = "motorcycle" image_url = ( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/motor.png" ) input_channels = 3 out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_seg_sd14v1.npy" image = load_image(image_url) expected_out = load_numpy(out_url) if input_channels == 1: image = image.convert("L") adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16) pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() generator = torch.Generator(device="cpu").manual_seed(0) out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_adapter_zoedepth_sd_v15(self): adapter_model = "TencentARC/t2iadapter_zoedepth_sd15v1" sd_model = "runwayml/stable-diffusion-v1-5" prompt = "motorcycle" image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/motorcycle.png" input_channels = 3 out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_zoedepth_sd15v1.npy" image = load_image(image_url) expected_out = load_numpy(out_url) if input_channels == 1: image = image.convert("L") adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16) pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() generator = torch.Generator(device="cpu").manual_seed(0) out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_adapter_canny_sd_v14(self): adapter_model = "TencentARC/t2iadapter_canny_sd14v1" sd_model = "CompVis/stable-diffusion-v1-4" prompt = "toy" image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/toy_canny.png" input_channels = 1 out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_canny_sd14v1.npy" image = load_image(image_url) expected_out = load_numpy(out_url) if input_channels == 1: image = image.convert("L") adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16) pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() generator = torch.Generator(device="cpu").manual_seed(0) out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_adapter_canny_sd_v15(self): adapter_model = "TencentARC/t2iadapter_canny_sd15v2" sd_model = "runwayml/stable-diffusion-v1-5" prompt = "toy" image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/toy_canny.png" input_channels = 1 out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_canny_sd15v2.npy" image = load_image(image_url) expected_out = load_numpy(out_url) if input_channels == 1: image = image.convert("L") adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16) pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() generator = torch.Generator(device="cpu").manual_seed(0) out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_adapter_sketch_sd14(self): adapter_model = "TencentARC/t2iadapter_sketch_sd14v1" sd_model = "CompVis/stable-diffusion-v1-4" prompt = "cat" image_url = ( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/edge.png" ) input_channels = 1 out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_sketch_sd14v1.npy" image = load_image(image_url) expected_out = load_numpy(out_url) if input_channels == 1: image = image.convert("L") adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16) pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() generator = torch.Generator(device="cpu").manual_seed(0) out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_adapter_sketch_sd15(self): adapter_model = "TencentARC/t2iadapter_sketch_sd15v2" sd_model = "runwayml/stable-diffusion-v1-5" prompt = "cat" image_url = ( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/edge.png" ) input_channels = 1 out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_sketch_sd15v2.npy" image = load_image(image_url) expected_out = load_numpy(out_url) if input_channels == 1: image = image.convert("L") adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16) pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() generator = torch.Generator(device="cpu").manual_seed(0) out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_adapter_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() adapter = T2IAdapter.from_pretrained("TencentARC/t2iadapter_seg_sd14v1") pipe = StableDiffusionAdapterPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", adapter=adapter, safety_checker=None ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/motor.png" ) pipe(prompt="foo", image=image, num_inference_steps=2) mem_bytes = torch.cuda.max_memory_allocated() assert mem_bytes < 5 * 10**9
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_inpaint.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class OnnxStableDiffusionPipelineFastTests(OnnxPipelineTesterMixin, unittest.TestCase): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class OnnxStableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase): @property def gpu_provider(self): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def gpu_options(self): options = ort.SessionOptions() options.enable_mem_pattern = False return options def test_inference_default_pndm(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) mask_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) pipe = OnnxStableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", revision="onnx", safety_checker=None, feature_extractor=None, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=None) prompt = "A red cat sitting on a park bench" generator = np.random.RandomState(0) output = pipe( prompt=prompt, image=init_image, mask_image=mask_image, guidance_scale=7.5, num_inference_steps=10, generator=generator, output_type="np", ) images = output.images image_slice = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) expected_slice = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_inference_k_lms(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) mask_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) lms_scheduler = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-inpainting", subfolder="scheduler", revision="onnx" ) pipe = OnnxStableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", revision="onnx", scheduler=lms_scheduler, safety_checker=None, feature_extractor=None, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=None) prompt = "A red cat sitting on a park bench" generator = np.random.RandomState(0) output = pipe( prompt=prompt, image=init_image, mask_image=mask_image, guidance_scale=7.5, num_inference_steps=20, generator=generator, output_type="np", ) images = output.images image_slice = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) expected_slice = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_image_variation.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModelWithProjection from diffusers import ( AutoencoderKL, DPMSolverMultistepScheduler, PNDMScheduler, StableDiffusionImageVariationPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class StableDiffusionImageVariationPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionImageVariationPipeline params = IMAGE_VARIATION_PARAMS batch_params = IMAGE_VARIATION_BATCH_PARAMS image_params = frozenset([]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess image_latents_params = frozenset([]) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) image_encoder_config = CLIPVisionConfig( hidden_size=32, projection_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, image_size=32, patch_size=4, ) image_encoder = CLIPVisionModelWithProjection(image_encoder_config) feature_extractor = CLIPImageProcessor(crop_size=32, size=32) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "image_encoder": image_encoder, "feature_extractor": feature_extractor, "safety_checker": None, } return components def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB").resize((32, 32)) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_stable_diffusion_img_variation_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionImageVariationPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5239, 0.5723, 0.4796, 0.5049, 0.5550, 0.4685, 0.5329, 0.4891, 0.4921]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_img_variation_multiple_images(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionImageVariationPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["image"] = 2 * [inputs["image"]] output = sd_pipe(**inputs) image = output.images image_slice = image[-1, -3:, -3:, -1] assert image.shape == (2, 64, 64, 3) expected_slice = np.array([0.6892, 0.5637, 0.5836, 0.5771, 0.6254, 0.6409, 0.5580, 0.5569, 0.5289]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class StableDiffusionImageVariationPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_imgvar/input_image_vermeer.png" ) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "image": init_image, "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_stable_diffusion_img_variation_pipeline_default(self): sd_pipe = StableDiffusionImageVariationPipeline.from_pretrained( "lambdalabs/sd-image-variations-diffusers", safety_checker=None ) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_inputs(generator_device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.8449, 0.9079, 0.7571, 0.7873, 0.8348, 0.7010, 0.6694, 0.6873, 0.6138]) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 1e-4 def test_stable_diffusion_img_variation_intermediate_state(self): number_of_steps = 0 def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None: callback_fn.has_been_called = True nonlocal number_of_steps number_of_steps += 1 if step == 1: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array([-0.7974, -0.4343, -1.087, 0.04785, -1.327, 0.855, -2.148, -0.1725, 1.439]) max_diff = numpy_cosine_similarity_distance(latents_slice.flatten(), expected_slice) assert max_diff < 1e-3 elif step == 2: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array([0.3232, 0.004883, 0.913, -1.084, 0.6143, -1.6875, -2.463, -0.439, -0.419]) max_diff = numpy_cosine_similarity_distance(latents_slice.flatten(), expected_slice) assert max_diff < 1e-3 callback_fn.has_been_called = False pipe = StableDiffusionImageVariationPipeline.from_pretrained( "lambdalabs/sd-image-variations-diffusers", safety_checker=None, torch_dtype=torch.float16, ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() generator_device = "cpu" inputs = self.get_inputs(generator_device, dtype=torch.float16) pipe(**inputs, callback=callback_fn, callback_steps=1) assert callback_fn.has_been_called assert number_of_steps == inputs["num_inference_steps"] def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionImageVariationPipeline.from_pretrained( "lambdalabs/sd-image-variations-diffusers", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() inputs = self.get_inputs(torch_device, dtype=torch.float16) _ = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 2.6 GB is allocated assert mem_bytes < 2.6 * 10**9 @nightly @require_torch_gpu class StableDiffusionImageVariationPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_imgvar/input_image_vermeer.png" ) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "image": init_image, "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_img_variation_pndm(self): sd_pipe = StableDiffusionImageVariationPipeline.from_pretrained("fusing/sd-image-variations-diffusers") sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_imgvar/lambdalabs_variations_pndm.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_img_variation_dpm(self): sd_pipe = StableDiffusionImageVariationPipeline.from_pretrained("fusing/sd-image-variations-diffusers") sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 25 image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_imgvar/lambdalabs_variations_dpm_multi.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_panorama.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, skip_mps, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class StableDiffusionPanoramaPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_class = StableDiffusionPanoramaPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = DDIMScheduler() torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): generator = torch.manual_seed(seed) inputs = { "prompt": "a photo of the dolomites", "generator": generator, # Setting height and width to None to prevent OOMs on CPU. "height": None, "width": None, "num_inference_steps": 1, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_stable_diffusion_panorama_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPanoramaPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_panorama_circular_padding_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPanoramaPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs, circular_padding=True).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6127, 0.6299, 0.4595, 0.4051, 0.4543, 0.3925, 0.5510, 0.5693, 0.5031]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 # override to speed the overall test timing up. def test_inference_batch_consistent(self): super().test_inference_batch_consistent(batch_sizes=[1, 2]) # override to speed the overall test timing up. def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(batch_size=2, expected_max_diff=5.0e-3) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) def test_stable_diffusion_panorama_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPanoramaPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" output = sd_pipe(**inputs, negative_prompt=negative_prompt) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_panorama_views_batch(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPanoramaPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs, view_batch_size=2) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_panorama_views_batch_circular_padding(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPanoramaPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs, circular_padding=True, view_batch_size=2) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6127, 0.6299, 0.4595, 0.4051, 0.4543, 0.3925, 0.5510, 0.5693, 0.5031]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_panorama_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = EulerAncestralDiscreteScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear" ) sd_pipe = StableDiffusionPanoramaPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_panorama_pndm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", skip_prk_steps=True ) sd_pipe = StableDiffusionPanoramaPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @nightly @require_torch_gpu class StableDiffusionPanoramaNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, seed=0): generator = torch.manual_seed(seed) inputs = { "prompt": "a photo of the dolomites", "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_stable_diffusion_panorama_default(self): model_ckpt = "stabilityai/stable-diffusion-2-base" scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler") pipe = StableDiffusionPanoramaPipeline.from_pretrained(model_ckpt, scheduler=scheduler, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) expected_slice = np.array( [ 0.36968392, 0.27025372, 0.32446766, 0.28379387, 0.36363274, 0.30733347, 0.27100027, 0.27054125, 0.25536096, ] ) assert np.abs(expected_slice - image_slice).max() < 1e-2 def test_stable_diffusion_panorama_k_lms(self): pipe = StableDiffusionPanoramaPipeline.from_pretrained( "stabilityai/stable-diffusion-2-base", safety_checker=None ) pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) expected_slice = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice).max() < 1e-2 def test_stable_diffusion_panorama_intermediate_state(self): number_of_steps = 0 def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None: callback_fn.has_been_called = True nonlocal number_of_steps number_of_steps += 1 if step == 1: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [ 0.18681869, 0.33907816, 0.5361276, 0.14432865, -0.02856611, -0.73941123, 0.23397987, 0.47322682, -0.37823164, ] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 elif step == 2: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [ 0.18539645, 0.33987248, 0.5378559, 0.14437142, -0.02455261, -0.7338317, 0.23990755, 0.47356272, -0.3786505, ] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 callback_fn.has_been_called = False model_ckpt = "stabilityai/stable-diffusion-2-base" scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler") pipe = StableDiffusionPanoramaPipeline.from_pretrained(model_ckpt, scheduler=scheduler, safety_checker=None) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() pipe(**inputs, callback=callback_fn, callback_steps=1) assert callback_fn.has_been_called assert number_of_steps == 3 def test_stable_diffusion_panorama_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() model_ckpt = "stabilityai/stable-diffusion-2-base" scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler") pipe = StableDiffusionPanoramaPipeline.from_pretrained(model_ckpt, scheduler=scheduler, safety_checker=None) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() inputs = self.get_inputs() _ = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_ldm3d.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, PNDMScheduler, StableDiffusionLDM3DPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS enable_full_determinism() class StableDiffusionLDM3DPipelineFastTests(unittest.TestCase): pipeline_class = StableDiffusionLDM3DPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=6, out_channels=6, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_stable_diffusion_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() ldm3d_pipe = StableDiffusionLDM3DPipeline(**components) ldm3d_pipe = ldm3d_pipe.to(torch_device) ldm3d_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = ldm3d_pipe(**inputs) rgb, depth = output.rgb, output.depth image_slice_rgb = rgb[0, -3:, -3:, -1] image_slice_depth = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) expected_slice_rgb = np.array( [0.37338176, 0.70247, 0.74203193, 0.51643604, 0.58256793, 0.60932136, 0.4181095, 0.48355877, 0.46535262] ) expected_slice_depth = np.array([103.46727, 85.812004, 87.849236]) assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb).max() < 1e-2 assert np.abs(image_slice_depth.flatten() - expected_slice_depth).max() < 1e-2 def test_stable_diffusion_prompt_embeds(self): components = self.get_dummy_components() ldm3d_pipe = StableDiffusionLDM3DPipeline(**components) ldm3d_pipe = ldm3d_pipe.to(torch_device) ldm3d_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = ldm3d_pipe(**inputs) rgb_slice_1, depth_slice_1 = output.rgb, output.depth rgb_slice_1 = rgb_slice_1[0, -3:, -3:, -1] depth_slice_1 = depth_slice_1[0, -3:, -1] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] text_inputs = ldm3d_pipe.tokenizer( prompt, padding="max_length", max_length=ldm3d_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) prompt_embeds = ldm3d_pipe.text_encoder(text_inputs)[0] inputs["prompt_embeds"] = prompt_embeds # forward output = ldm3d_pipe(**inputs) rgb_slice_2, depth_slice_2 = output.rgb, output.depth rgb_slice_2 = rgb_slice_2[0, -3:, -3:, -1] depth_slice_2 = depth_slice_2[0, -3:, -1] assert np.abs(rgb_slice_1.flatten() - rgb_slice_2.flatten()).max() < 1e-4 assert np.abs(depth_slice_1.flatten() - depth_slice_2.flatten()).max() < 1e-4 def test_stable_diffusion_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) ldm3d_pipe = StableDiffusionLDM3DPipeline(**components) ldm3d_pipe = ldm3d_pipe.to(device) ldm3d_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" output = ldm3d_pipe(**inputs, negative_prompt=negative_prompt) rgb, depth = output.rgb, output.depth rgb_slice = rgb[0, -3:, -3:, -1] depth_slice = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) expected_slice_rgb = np.array( [0.37044, 0.71811503, 0.7223251, 0.48603675, 0.5638391, 0.6364948, 0.42833704, 0.4901315, 0.47926217] ) expected_slice_depth = np.array([107.84738, 84.62802, 89.962135]) assert np.abs(rgb_slice.flatten() - expected_slice_rgb).max() < 1e-2 assert np.abs(depth_slice.flatten() - expected_slice_depth).max() < 1e-2 @nightly @require_torch_gpu class StableDiffusionLDM3DPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_ldm3d_stable_diffusion(self): ldm3d_pipe = StableDiffusionLDM3DPipeline.from_pretrained("Intel/ldm3d") ldm3d_pipe = ldm3d_pipe.to(torch_device) ldm3d_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) output = ldm3d_pipe(**inputs) rgb, depth = output.rgb, output.depth rgb_slice = rgb[0, -3:, -3:, -1].flatten() depth_slice = rgb[0, -3:, -1].flatten() assert rgb.shape == (1, 512, 512, 3) assert depth.shape == (1, 512, 512) expected_slice_rgb = np.array( [0.53805465, 0.56707305, 0.5486515, 0.57012236, 0.5814511, 0.56253487, 0.54843014, 0.55092263, 0.6459706] ) expected_slice_depth = np.array( [0.9263781, 0.6678672, 0.5486515, 0.92202145, 0.67831135, 0.56253487, 0.9241694, 0.7551478, 0.6459706] ) assert np.abs(rgb_slice - expected_slice_rgb).max() < 3e-3 assert np.abs(depth_slice - expected_slice_depth).max() < 3e-3 @nightly @require_torch_gpu class StableDiffusionPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_ldm3d(self): ldm3d_pipe = StableDiffusionLDM3DPipeline.from_pretrained("Intel/ldm3d").to(torch_device) ldm3d_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) output = ldm3d_pipe(**inputs) rgb, depth = output.rgb, output.depth expected_rgb_mean = 0.495586 expected_rgb_std = 0.33795515 expected_depth_mean = 112.48518 expected_depth_std = 98.489746 assert np.abs(expected_rgb_mean - rgb.mean()) < 1e-3 assert np.abs(expected_rgb_std - rgb.std()) < 1e-3 assert np.abs(expected_depth_mean - depth.mean()) < 1e-3 assert np.abs(expected_depth_std - depth.std()) < 1e-3 def test_ldm3d_v2(self): ldm3d_pipe = StableDiffusionLDM3DPipeline.from_pretrained("Intel/ldm3d-4c").to(torch_device) ldm3d_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) output = ldm3d_pipe(**inputs) rgb, depth = output.rgb, output.depth expected_rgb_mean = 0.4194127 expected_rgb_std = 0.35375586 expected_depth_mean = 0.5638502 expected_depth_std = 0.34686103 assert rgb.shape == (1, 512, 512, 3) assert depth.shape == (1, 512, 512, 1) assert np.abs(expected_rgb_mean - rgb.mean()) < 1e-3 assert np.abs(expected_rgb_std - rgb.std()) < 1e-3 assert np.abs(expected_depth_mean - depth.mean()) < 1e-3 assert np.abs(expected_depth_std - depth.std()) < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_img2img.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImg2ImgPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import ( floats_tensor, is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class OnnxStableDiffusionImg2ImgPipelineFastTests(OnnxPipelineTesterMixin, unittest.TestCase): hub_checkpoint = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" def get_dummy_inputs(self, seed=0): image = floats_tensor((1, 3, 128, 128), rng=random.Random(seed)) generator = np.random.RandomState(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_pipeline_default_ddim(self): pipe = OnnxStableDiffusionImg2ImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087]) assert np.abs(image_slice - expected_slice).max() < 1e-1 def test_pipeline_pndm(self): pipe = OnnxStableDiffusionImg2ImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=True) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def test_pipeline_lms(self): pipe = OnnxStableDiffusionImg2ImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) # warmup pass to apply optimizations _ = pipe(**self.get_dummy_inputs()) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def test_pipeline_euler(self): pipe = OnnxStableDiffusionImg2ImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def test_pipeline_euler_ancestral(self): pipe = OnnxStableDiffusionImg2ImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def test_pipeline_dpm_multistep(self): pipe = OnnxStableDiffusionImg2ImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class OnnxStableDiffusionImg2ImgPipelineIntegrationTests(unittest.TestCase): @property def gpu_provider(self): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def gpu_options(self): options = ort.SessionOptions() options.enable_mem_pattern = False return options def test_inference_default_pndm(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) init_image = init_image.resize((768, 512)) # using the PNDM scheduler by default pipe = OnnxStableDiffusionImg2ImgPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=None, feature_extractor=None, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=None) prompt = "A fantasy landscape, trending on artstation" generator = np.random.RandomState(0) output = pipe( prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=generator, output_type="np", ) images = output.images image_slice = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) expected_slice = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2 def test_inference_k_lms(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) init_image = init_image.resize((768, 512)) lms_scheduler = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx" ) pipe = OnnxStableDiffusionImg2ImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", revision="onnx", scheduler=lms_scheduler, safety_checker=None, feature_extractor=None, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=None) prompt = "A fantasy landscape, trending on artstation" generator = np.random.RandomState(0) output = pipe( prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=generator, output_type="np", ) images = output.images image_slice = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) expected_slice = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_cycle_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNet2DConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, require_torch_gpu, skip_mps, torch_device, ) from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class CycleDiffusionPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_class = CycleDiffusionPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { "negative_prompt", "height", "width", "negative_prompt_embeds", } required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"}) image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000, clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image / 2 + 0.5 if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "An astronaut riding an elephant", "source_prompt": "An astronaut riding a horse", "image": image, "generator": generator, "num_inference_steps": 2, "eta": 0.1, "strength": 0.8, "guidance_scale": 3, "source_guidance_scale": 1, "output_type": "numpy", } return inputs def test_stable_diffusion_cycle(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = CycleDiffusionPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = pipe(**inputs) images = output.images image_slice = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) expected_slice = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") def test_stable_diffusion_cycle_fp16(self): components = self.get_dummy_components() for name, module in components.items(): if hasattr(module, "half"): components[name] = module.half() pipe = CycleDiffusionPipeline(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs) images = output.images image_slice = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) expected_slice = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @skip_mps def test_save_load_local(self): return super().test_save_load_local() @unittest.skip("non-deterministic pipeline") def test_inference_batch_single_identical(self): return super().test_inference_batch_single_identical() @skip_mps def test_dict_tuple_outputs_equivalent(self): return super().test_dict_tuple_outputs_equivalent() @skip_mps def test_save_load_optional_components(self): return super().test_save_load_optional_components() @skip_mps def test_attention_slicing_forward_pass(self): return super().test_attention_slicing_forward_pass() @nightly @require_torch_gpu class CycleDiffusionPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_cycle_diffusion_pipeline_fp16(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" ) init_image = init_image.resize((512, 512)) model_id = "CompVis/stable-diffusion-v1-4" scheduler = DDIMScheduler.from_pretrained(model_id, subfolder="scheduler") pipe = CycleDiffusionPipeline.from_pretrained( model_id, scheduler=scheduler, safety_checker=None, torch_dtype=torch.float16, revision="fp16" ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() source_prompt = "A black colored car" prompt = "A blue colored car" generator = torch.manual_seed(0) output = pipe( prompt=prompt, source_prompt=source_prompt, image=init_image, num_inference_steps=100, eta=0.1, strength=0.85, guidance_scale=3, source_guidance_scale=1, generator=generator, output_type="np", ) image = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image).max() < 5e-1 def test_cycle_diffusion_pipeline(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" ) init_image = init_image.resize((512, 512)) model_id = "CompVis/stable-diffusion-v1-4" scheduler = DDIMScheduler.from_pretrained(model_id, subfolder="scheduler") pipe = CycleDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() source_prompt = "A black colored car" prompt = "A blue colored car" generator = torch.manual_seed(0) output = pipe( prompt=prompt, source_prompt=source_prompt, image=init_image, num_inference_steps=100, eta=0.1, strength=0.85, guidance_scale=3, source_guidance_scale=1, generator=generator, output_type="np", ) image = output.images assert np.abs(image - expected_image).max() < 2e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_pix2pix_zero.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DDPMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, StableDiffusionPix2PixZeroPipeline, UNet2DConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, load_pt, nightly, require_torch_gpu, skip_mps, torch_device, ) from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, ) from ..test_pipelines_common import ( PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() @skip_mps class StableDiffusionPix2PixZeroPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_class = StableDiffusionPix2PixZeroPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"image"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def setUpClass(cls): cls.source_embeds = load_pt( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/src_emb_0.pt" ) cls.target_embeds = load_pt( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/tgt_emb_0.pt" ) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = DDIMScheduler() inverse_scheduler = DDIMInverseScheduler() torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "inverse_scheduler": inverse_scheduler, "caption_generator": None, "caption_processor": None, } return components def get_dummy_inputs(self, device, seed=0): generator = torch.manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "cross_attention_guidance_amount": 0.15, "source_embeds": self.source_embeds, "target_embeds": self.target_embeds, "output_type": "numpy", } return inputs def get_dummy_inversion_inputs(self, device, seed=0): dummy_image = floats_tensor((2, 3, 32, 32), rng=random.Random(seed)).to(torch_device) dummy_image = dummy_image / 2 + 0.5 generator = torch.manual_seed(seed) inputs = { "prompt": [ "A painting of a squirrel eating a burger", "A painting of a burger eating a squirrel", ], "image": dummy_image.cpu(), "num_inference_steps": 2, "guidance_scale": 6.0, "generator": generator, "output_type": "numpy", } return inputs def get_dummy_inversion_inputs_by_type(self, device, seed=0, input_image_type="pt", output_type="np"): inputs = self.get_dummy_inversion_inputs(device, seed) if input_image_type == "pt": image = inputs["image"] elif input_image_type == "np": image = VaeImageProcessor.pt_to_numpy(inputs["image"]) elif input_image_type == "pil": image = VaeImageProcessor.pt_to_numpy(inputs["image"]) image = VaeImageProcessor.numpy_to_pil(image) else: raise ValueError(f"unsupported input_image_type {input_image_type}") inputs["image"] = image inputs["output_type"] = output_type return inputs def test_save_load_optional_components(self): if not hasattr(self.pipeline_class, "_optional_components"): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components}) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(output - output_loaded).max() self.assertLess(max_diff, 1e-4) def test_stable_diffusion_pix2pix_zero_inversion(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inversion_inputs(device) inputs["image"] = inputs["image"][:1] inputs["prompt"] = inputs["prompt"][:1] image = sd_pipe.invert(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4732, 0.4630, 0.5722, 0.5103, 0.5140, 0.5622, 0.5104, 0.5390, 0.5020]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_inversion_batch(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inversion_inputs(device) image = sd_pipe.invert(**inputs).images image_slice = image[1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) expected_slice = np.array([0.6046, 0.5400, 0.4902, 0.4448, 0.4694, 0.5498, 0.4857, 0.5073, 0.5089]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4863, 0.5053, 0.5033, 0.4007, 0.3571, 0.4768, 0.5176, 0.5277, 0.4940]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" output = sd_pipe(**inputs, negative_prompt=negative_prompt) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5177, 0.5097, 0.5047, 0.4076, 0.3667, 0.4767, 0.5238, 0.5307, 0.4958]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = EulerAncestralDiscreteScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear" ) sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5421, 0.5525, 0.6085, 0.5279, 0.4658, 0.5317, 0.4418, 0.4815, 0.5132]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_ddpm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = DDPMScheduler() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4861, 0.5053, 0.5038, 0.3994, 0.3562, 0.4768, 0.5172, 0.5280, 0.4938]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_inversion_pt_np_pil_outputs_equivalent(self): device = torch_device components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) output_pt = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, output_type="pt")).images output_np = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, output_type="np")).images output_pil = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, output_type="pil")).images max_diff = np.abs(output_pt.cpu().numpy().transpose(0, 2, 3, 1) - output_np).max() self.assertLess(max_diff, 1e-4, "`output_type=='pt'` generate different results from `output_type=='np'`") max_diff = np.abs(np.array(output_pil[0]) - (output_np[0] * 255).round()).max() self.assertLess(max_diff, 2.0, "`output_type=='pil'` generate different results from `output_type=='np'`") def test_stable_diffusion_pix2pix_zero_inversion_pt_np_pil_inputs_equivalent(self): device = torch_device components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) out_input_pt = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, input_image_type="pt")).images out_input_np = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, input_image_type="np")).images out_input_pil = sd_pipe.invert( **self.get_dummy_inversion_inputs_by_type(device, input_image_type="pil") ).images max_diff = np.abs(out_input_pt - out_input_np).max() self.assertLess(max_diff, 1e-4, "`input_type=='pt'` generate different result from `input_type=='np'`") assert_mean_pixel_difference(out_input_pil, out_input_np, expected_max_diff=1) # Non-determinism caused by the scheduler optimizing the latent inputs during inference @unittest.skip("non-deterministic pipeline") def test_inference_batch_single_identical(self): return super().test_inference_batch_single_identical() @nightly @require_torch_gpu class StableDiffusionPix2PixZeroPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def setUpClass(cls): cls.source_embeds = load_pt( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/cat.pt" ) cls.target_embeds = load_pt( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/dog.pt" ) def get_inputs(self, seed=0): generator = torch.manual_seed(seed) inputs = { "prompt": "turn him into a cyborg", "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "cross_attention_guidance_amount": 0.15, "source_embeds": self.source_embeds, "target_embeds": self.target_embeds, "output_type": "numpy", } return inputs def test_stable_diffusion_pix2pix_zero_default(self): pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16 ) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.5742, 0.5757, 0.5747, 0.5781, 0.5688, 0.5713, 0.5742, 0.5664, 0.5747]) assert np.abs(expected_slice - image_slice).max() < 5e-2 def test_stable_diffusion_pix2pix_zero_k_lms(self): pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16 ) pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.6367, 0.5459, 0.5146, 0.5479, 0.4905, 0.4753, 0.4961, 0.4629, 0.4624]) assert np.abs(expected_slice - image_slice).max() < 5e-2 def test_stable_diffusion_pix2pix_zero_intermediate_state(self): number_of_steps = 0 def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None: callback_fn.has_been_called = True nonlocal number_of_steps number_of_steps += 1 if step == 1: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array([0.1345, 0.268, 0.1539, 0.0726, 0.0959, 0.2261, -0.2673, 0.0277, -0.2062]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 elif step == 2: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array([0.1393, 0.2637, 0.1617, 0.0724, 0.0987, 0.2271, -0.2666, 0.0299, -0.2104]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 callback_fn.has_been_called = False pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16 ) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() pipe(**inputs, callback=callback_fn, callback_steps=1) assert callback_fn.has_been_called assert number_of_steps == 3 def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16 ) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() inputs = self.get_inputs() _ = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 8.2 GB is allocated assert mem_bytes < 8.2 * 10**9 @nightly @require_torch_gpu class InversionPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def setUpClass(cls): raw_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/cat_6.png" ) raw_image = raw_image.convert("RGB").resize((512, 512)) cls.raw_image = raw_image def test_stable_diffusion_pix2pix_inversion(self): pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16 ) pipe.inverse_scheduler = DDIMInverseScheduler.from_config(pipe.scheduler.config) caption = "a photography of a cat with flowers" pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) output = pipe.invert(caption, image=self.raw_image, generator=generator, num_inference_steps=10) inv_latents = output[0] image_slice = inv_latents[0, -3:, -3:, -1].flatten() assert inv_latents.shape == (1, 4, 64, 64) expected_slice = np.array([0.8447, -0.0730, 0.7588, -1.2070, -0.4678, 0.1511, -0.8555, 1.1816, -0.7666]) assert np.abs(expected_slice - image_slice.cpu().numpy()).max() < 5e-2 def test_stable_diffusion_2_pix2pix_inversion(self): pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1", safety_checker=None, torch_dtype=torch.float16 ) pipe.inverse_scheduler = DDIMInverseScheduler.from_config(pipe.scheduler.config) caption = "a photography of a cat with flowers" pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) output = pipe.invert(caption, image=self.raw_image, generator=generator, num_inference_steps=10) inv_latents = output[0] image_slice = inv_latents[0, -3:, -3:, -1].flatten() assert inv_latents.shape == (1, 4, 64, 64) expected_slice = np.array([0.8970, -0.1611, 0.4766, -1.1162, -0.5923, 0.1050, -0.9678, 1.0537, -0.6050]) assert np.abs(expected_slice - image_slice.cpu().numpy()).max() < 5e-2 def test_stable_diffusion_2_pix2pix_full(self): # numpy array of https://huggingface.co/datasets/hf-internal-testing/diffusers-images/blob/main/pix2pix/dog_2.png expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/dog_2.npy" ) pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1", safety_checker=None, torch_dtype=torch.float16 ) pipe.inverse_scheduler = DDIMInverseScheduler.from_config(pipe.scheduler.config) caption = "a photography of a cat with flowers" pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) output = pipe.invert(caption, image=self.raw_image, generator=generator) inv_latents = output[0] source_prompts = 4 * ["a cat sitting on the street", "a cat playing in the field", "a face of a cat"] target_prompts = 4 * ["a dog sitting on the street", "a dog playing in the field", "a face of a dog"] source_embeds = pipe.get_embeds(source_prompts) target_embeds = pipe.get_embeds(target_prompts) image = pipe( caption, source_embeds=source_embeds, target_embeds=target_embeds, num_inference_steps=125, cross_attention_guidance_amount=0.015, generator=generator, latents=inv_latents, negative_prompt=caption, output_type="np", ).images mean_diff = np.abs(expected_image - image).mean() assert mean_diff < 0.25
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import traceback import unittest import numpy as np import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AsymmetricAutoencoderKL, AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, LCMScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel, ) from diffusers.models.attention_processor import AttnProcessor from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint import prepare_mask_and_masked_image from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, require_python39_or_higher, require_torch_2, require_torch_gpu, run_test_in_subprocess, slow, torch_device, ) from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() # Will be run via run_test_in_subprocess def _test_inpaint_compile(in_queue, out_queue, timeout): error = None try: inputs = in_queue.get(timeout=timeout) torch_device = inputs.pop("torch_device") seed = inputs.pop("seed") inputs["generator"] = torch.Generator(device=torch_device).manual_seed(seed) pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.unet.set_default_attn_processor() pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.unet.to(memory_format=torch.channels_last) pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0689, 0.0699, 0.0790, 0.0536, 0.0470, 0.0488, 0.041, 0.0508, 0.04179]) assert np.abs(expected_slice - image_slice).max() < 3e-3 except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() class StableDiffusionInpaintPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset([]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess image_latents_params = frozenset([]) callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"mask", "masked_image_latents"}) def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), time_cond_proj_dim=time_cond_proj_dim, layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0, img_res=64, output_pil=True): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched if output_pil: # Get random floats in [0, 1] as image image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] mask_image = torch.ones_like(image) # Convert image and mask_image to [0, 255] image = 255 * image mask_image = 255 * mask_image # Convert to PIL image init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((img_res, img_res)) mask_image = Image.fromarray(np.uint8(mask_image)).convert("RGB").resize((img_res, img_res)) else: # Get random floats in [0, 1] as image with spatial size (img_res, img_res) image = floats_tensor((1, 3, img_res, img_res), rng=random.Random(seed)).to(device) # Convert image to [-1, 1] init_image = 2.0 * image - 1.0 mask_image = torch.ones((1, 1, img_res, img_res), device=device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_stable_diffusion_inpaint(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4703, 0.5697, 0.3879, 0.5470, 0.6042, 0.4413, 0.5078, 0.4728, 0.4469]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4931, 0.5988, 0.4569, 0.5556, 0.6650, 0.5087, 0.5966, 0.5358, 0.5269]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4931, 0.5988, 0.4569, 0.5556, 0.6650, 0.5087, 0.5966, 0.5358, 0.5269]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_image_tensor(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) out_pil = output.images inputs = self.get_dummy_inputs(device) inputs["image"] = torch.tensor(np.array(inputs["image"]) / 127.5 - 1).permute(2, 0, 1).unsqueeze(0) inputs["mask_image"] = torch.tensor(np.array(inputs["mask_image"]) / 255).permute(2, 0, 1)[:1].unsqueeze(0) output = sd_pipe(**inputs) out_tensor = output.images assert out_pil.shape == (1, 64, 64, 3) assert np.abs(out_pil.flatten() - out_tensor.flatten()).max() < 5e-2 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) def test_stable_diffusion_inpaint_strength_zero_test(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) # check that the pipeline raises value error when num_inference_steps is < 1 inputs["strength"] = 0.01 with self.assertRaises(ValueError): sd_pipe(**inputs).images def test_stable_diffusion_inpaint_mask_latents(self): device = "cpu" components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(device) sd_pipe.set_progress_bar_config(disable=None) # normal mask + normal image ## `image`: pil, `mask_image``: pil, `masked_image_latents``: None inputs = self.get_dummy_inputs(device) inputs["strength"] = 0.9 out_0 = sd_pipe(**inputs).images # image latents + mask latents inputs = self.get_dummy_inputs(device) image = sd_pipe.image_processor.preprocess(inputs["image"]).to(sd_pipe.device) mask = sd_pipe.mask_processor.preprocess(inputs["mask_image"]).to(sd_pipe.device) masked_image = image * (mask < 0.5) generator = torch.Generator(device=device).manual_seed(0) image_latents = ( sd_pipe.vae.encode(image).latent_dist.sample(generator=generator) * sd_pipe.vae.config.scaling_factor ) torch.randn((1, 4, 32, 32), generator=generator) mask_latents = ( sd_pipe.vae.encode(masked_image).latent_dist.sample(generator=generator) * sd_pipe.vae.config.scaling_factor ) inputs["image"] = image_latents inputs["masked_image_latents"] = mask_latents inputs["mask_image"] = mask inputs["strength"] = 0.9 generator = torch.Generator(device=device).manual_seed(0) torch.randn((1, 4, 32, 32), generator=generator) inputs["generator"] = generator out_1 = sd_pipe(**inputs).images assert np.abs(out_0 - out_1).max() < 1e-2 class StableDiffusionSimpleInpaintPipelineFastTests(StableDiffusionInpaintPipelineFastTests): pipeline_class = StableDiffusionInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset([]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, time_cond_proj_dim=time_cond_proj_dim, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs_2images(self, device, seed=0, img_res=64): # Get random floats in [0, 1] as image with spatial size (img_res, img_res) image1 = floats_tensor((1, 3, img_res, img_res), rng=random.Random(seed)).to(device) image2 = floats_tensor((1, 3, img_res, img_res), rng=random.Random(seed + 22)).to(device) # Convert images to [-1, 1] init_image1 = 2.0 * image1 - 1.0 init_image2 = 2.0 * image2 - 1.0 # empty mask mask_image = torch.zeros((1, 1, img_res, img_res), device=device) if str(device).startswith("mps"): generator1 = torch.manual_seed(seed) generator2 = torch.manual_seed(seed) else: generator1 = torch.Generator(device=device).manual_seed(seed) generator2 = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": ["A painting of a squirrel eating a burger"] * 2, "image": [init_image1, init_image2], "mask_image": [mask_image] * 2, "generator": [generator1, generator2], "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_stable_diffusion_inpaint(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6584, 0.5424, 0.5649, 0.5449, 0.5897, 0.6111, 0.5404, 0.5463, 0.5214]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6240, 0.5355, 0.5649, 0.5378, 0.5374, 0.6242, 0.5132, 0.5347, 0.5396]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6240, 0.5355, 0.5649, 0.5378, 0.5374, 0.6242, 0.5132, 0.5347, 0.5396]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_2_images(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) # test to confirm if we pass two same image, we will get same output inputs = self.get_dummy_inputs(device) gen1 = torch.Generator(device=device).manual_seed(0) gen2 = torch.Generator(device=device).manual_seed(0) for name in ["prompt", "image", "mask_image"]: inputs[name] = [inputs[name]] * 2 inputs["generator"] = [gen1, gen2] images = sd_pipe(**inputs).images assert images.shape == (2, 64, 64, 3) image_slice1 = images[0, -3:, -3:, -1] image_slice2 = images[1, -3:, -3:, -1] assert np.abs(image_slice1.flatten() - image_slice2.flatten()).max() < 1e-4 # test to confirm that if we pass two different images, we will get different output inputs = self.get_dummy_inputs_2images(device) images = sd_pipe(**inputs).images assert images.shape == (2, 64, 64, 3) image_slice1 = images[0, -3:, -3:, -1] image_slice2 = images[1, -3:, -3:, -1] assert np.abs(image_slice1.flatten() - image_slice2.flatten()).max() > 1e-2 @slow @require_torch_gpu class StableDiffusionInpaintPipelineSlowTests(unittest.TestCase): def setUp(self): super().setUp() def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_mask.png" ) inputs = { "prompt": "Face of a yellow cat, high resolution, sitting on a park bench", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_stable_diffusion_inpaint_ddim(self): pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0427, 0.0460, 0.0483, 0.0460, 0.0584, 0.0521, 0.1549, 0.1695, 0.1794]) assert np.abs(expected_slice - image_slice).max() < 6e-4 def test_stable_diffusion_inpaint_fp16(self): pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, safety_checker=None ) pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device, dtype=torch.float16) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.1509, 0.1245, 0.1672, 0.1655, 0.1519, 0.1226, 0.1462, 0.1567, 0.2451]) assert np.abs(expected_slice - image_slice).max() < 1e-1 def test_stable_diffusion_inpaint_pndm(self): pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0425, 0.0273, 0.0344, 0.1694, 0.1727, 0.1812, 0.3256, 0.3311, 0.3272]) assert np.abs(expected_slice - image_slice).max() < 5e-3 def test_stable_diffusion_inpaint_k_lms(self): pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.9314, 0.7575, 0.9432, 0.8885, 0.9028, 0.7298, 0.9811, 0.9667, 0.7633]) assert np.abs(expected_slice - image_slice).max() < 6e-3 def test_stable_diffusion_inpaint_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() inputs = self.get_inputs(torch_device, dtype=torch.float16) _ = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 @require_python39_or_higher @require_torch_2 def test_inpaint_compile(self): seed = 0 inputs = self.get_inputs(torch_device, seed=seed) # Can't pickle a Generator object del inputs["generator"] inputs["torch_device"] = torch_device inputs["seed"] = seed run_test_in_subprocess(test_case=self, target_func=_test_inpaint_compile, inputs=inputs) def test_stable_diffusion_inpaint_pil_input_resolution_test(self): pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) # change input image to a random size (one that would cause a tensor mismatch error) inputs["image"] = inputs["image"].resize((127, 127)) inputs["mask_image"] = inputs["mask_image"].resize((127, 127)) inputs["height"] = 128 inputs["width"] = 128 image = pipe(**inputs).images # verify that the returned image has the same height and width as the input height and width assert image.shape == (1, inputs["height"], inputs["width"], 3) def test_stable_diffusion_inpaint_strength_test(self): pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) # change input strength inputs["strength"] = 0.75 image = pipe(**inputs).images # verify that the returned image has the same height and width as the input height and width assert image.shape == (1, 512, 512, 3) image_slice = image[0, 253:256, 253:256, -1].flatten() expected_slice = np.array([0.2728, 0.2803, 0.2665, 0.2511, 0.2774, 0.2586, 0.2391, 0.2392, 0.2582]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def test_stable_diffusion_simple_inpaint_ddim(self): pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None) pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.3757, 0.3875, 0.4445, 0.4353, 0.3780, 0.4513, 0.3965, 0.3984, 0.4362]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def test_download_local(self): filename = hf_hub_download("runwayml/stable-diffusion-inpainting", filename="sd-v1-5-inpainting.ckpt") pipe = StableDiffusionInpaintPipeline.from_single_file(filename, torch_dtype=torch.float16) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.to("cuda") inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 1 image_out = pipe(**inputs).images[0] assert image_out.shape == (512, 512, 3) def test_download_ckpt_diff_format_is_same(self): ckpt_path = "https://huggingface.co/runwayml/stable-diffusion-inpainting/blob/main/sd-v1-5-inpainting.ckpt" pipe = StableDiffusionInpaintPipeline.from_single_file(ckpt_path) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.unet.set_attn_processor(AttnProcessor()) pipe.to("cuda") inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 5 image_ckpt = pipe(**inputs).images[0] pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting") pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.unet.set_attn_processor(AttnProcessor()) pipe.to("cuda") inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 5 image = pipe(**inputs).images[0] assert np.max(np.abs(image - image_ckpt)) < 5e-4 @slow @require_torch_gpu class StableDiffusionInpaintPipelineAsymmetricAutoencoderKLSlowTests(unittest.TestCase): def setUp(self): super().setUp() def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_mask.png" ) inputs = { "prompt": "Face of a yellow cat, high resolution, sitting on a park bench", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_stable_diffusion_inpaint_ddim(self): vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5") pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.vae = vae pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0522, 0.0604, 0.0596, 0.0449, 0.0493, 0.0427, 0.1186, 0.1289, 0.1442]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def test_stable_diffusion_inpaint_fp16(self): vae = AsymmetricAutoencoderKL.from_pretrained( "cross-attention/asymmetric-autoencoder-kl-x-1-5", torch_dtype=torch.float16 ) pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, safety_checker=None ) pipe.unet.set_default_attn_processor() pipe.vae = vae pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device, dtype=torch.float16) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.1343, 0.1406, 0.1440, 0.1504, 0.1729, 0.0989, 0.1807, 0.2822, 0.1179]) assert np.abs(expected_slice - image_slice).max() < 5e-2 def test_stable_diffusion_inpaint_pndm(self): vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5") pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.unet.set_default_attn_processor() pipe.vae = vae pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0966, 0.1083, 0.1148, 0.1422, 0.1318, 0.1197, 0.3702, 0.3537, 0.3288]) assert np.abs(expected_slice - image_slice).max() < 5e-3 def test_stable_diffusion_inpaint_k_lms(self): vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5") pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.unet.set_default_attn_processor() pipe.vae = vae pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.8931, 0.8683, 0.8965, 0.8501, 0.8592, 0.9118, 0.8734, 0.7463, 0.8990]) assert np.abs(expected_slice - image_slice).max() < 6e-3 def test_stable_diffusion_inpaint_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() vae = AsymmetricAutoencoderKL.from_pretrained( "cross-attention/asymmetric-autoencoder-kl-x-1-5", torch_dtype=torch.float16 ) pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None, torch_dtype=torch.float16 ) pipe.vae = vae pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() inputs = self.get_inputs(torch_device, dtype=torch.float16) _ = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 2.45 GB is allocated assert mem_bytes < 2.45 * 10**9 @require_python39_or_higher @require_torch_2 def test_inpaint_compile(self): pass def test_stable_diffusion_inpaint_pil_input_resolution_test(self): vae = AsymmetricAutoencoderKL.from_pretrained( "cross-attention/asymmetric-autoencoder-kl-x-1-5", ) pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.vae = vae pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) # change input image to a random size (one that would cause a tensor mismatch error) inputs["image"] = inputs["image"].resize((127, 127)) inputs["mask_image"] = inputs["mask_image"].resize((127, 127)) inputs["height"] = 128 inputs["width"] = 128 image = pipe(**inputs).images # verify that the returned image has the same height and width as the input height and width assert image.shape == (1, inputs["height"], inputs["width"], 3) def test_stable_diffusion_inpaint_strength_test(self): vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5") pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.unet.set_default_attn_processor() pipe.vae = vae pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) # change input strength inputs["strength"] = 0.75 image = pipe(**inputs).images # verify that the returned image has the same height and width as the input height and width assert image.shape == (1, 512, 512, 3) image_slice = image[0, 253:256, 253:256, -1].flatten() expected_slice = np.array([0.2458, 0.2576, 0.3124, 0.2679, 0.2669, 0.2796, 0.2872, 0.2975, 0.2661]) assert np.abs(expected_slice - image_slice).max() < 3e-3 def test_stable_diffusion_simple_inpaint_ddim(self): vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5") pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None) pipe.vae = vae pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.3296, 0.4041, 0.4097, 0.4145, 0.4342, 0.4152, 0.4927, 0.4931, 0.4430]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def test_download_local(self): vae = AsymmetricAutoencoderKL.from_pretrained( "cross-attention/asymmetric-autoencoder-kl-x-1-5", torch_dtype=torch.float16 ) filename = hf_hub_download("runwayml/stable-diffusion-inpainting", filename="sd-v1-5-inpainting.ckpt") pipe = StableDiffusionInpaintPipeline.from_single_file(filename, torch_dtype=torch.float16) pipe.vae = vae pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.to("cuda") inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 1 image_out = pipe(**inputs).images[0] assert image_out.shape == (512, 512, 3) def test_download_ckpt_diff_format_is_same(self): pass @nightly @require_torch_gpu class StableDiffusionInpaintPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_mask.png" ) inputs = { "prompt": "Face of a yellow cat, high resolution, sitting on a park bench", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 50, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_inpaint_ddim(self): sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting") sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/stable_diffusion_inpaint_ddim.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_inpaint_pndm(self): sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting") sd_pipe.scheduler = PNDMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/stable_diffusion_inpaint_pndm.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_inpaint_lms(self): sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting") sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/stable_diffusion_inpaint_lms.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_inpaint_dpm(self): sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting") sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 30 image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/stable_diffusion_inpaint_dpm_multi.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 class StableDiffusionInpaintingPrepareMaskAndMaskedImageTests(unittest.TestCase): def test_pil_inputs(self): height, width = 32, 32 im = np.random.randint(0, 255, (height, width, 3), dtype=np.uint8) im = Image.fromarray(im) mask = np.random.randint(0, 255, (height, width), dtype=np.uint8) > 127.5 mask = Image.fromarray((mask * 255).astype(np.uint8)) t_mask, t_masked, t_image = prepare_mask_and_masked_image(im, mask, height, width, return_image=True) self.assertTrue(isinstance(t_mask, torch.Tensor)) self.assertTrue(isinstance(t_masked, torch.Tensor)) self.assertTrue(isinstance(t_image, torch.Tensor)) self.assertEqual(t_mask.ndim, 4) self.assertEqual(t_masked.ndim, 4) self.assertEqual(t_image.ndim, 4) self.assertEqual(t_mask.shape, (1, 1, height, width)) self.assertEqual(t_masked.shape, (1, 3, height, width)) self.assertEqual(t_image.shape, (1, 3, height, width)) self.assertTrue(t_mask.dtype == torch.float32) self.assertTrue(t_masked.dtype == torch.float32) self.assertTrue(t_image.dtype == torch.float32) self.assertTrue(t_mask.min() >= 0.0) self.assertTrue(t_mask.max() <= 1.0) self.assertTrue(t_masked.min() >= -1.0) self.assertTrue(t_masked.min() <= 1.0) self.assertTrue(t_image.min() >= -1.0) self.assertTrue(t_image.min() >= -1.0) self.assertTrue(t_mask.sum() > 0.0) def test_np_inputs(self): height, width = 32, 32 im_np = np.random.randint(0, 255, (height, width, 3), dtype=np.uint8) im_pil = Image.fromarray(im_np) mask_np = ( np.random.randint( 0, 255, ( height, width, ), dtype=np.uint8, ) > 127.5 ) mask_pil = Image.fromarray((mask_np * 255).astype(np.uint8)) t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image( im_np, mask_np, height, width, return_image=True ) t_mask_pil, t_masked_pil, t_image_pil = prepare_mask_and_masked_image( im_pil, mask_pil, height, width, return_image=True ) self.assertTrue((t_mask_np == t_mask_pil).all()) self.assertTrue((t_masked_np == t_masked_pil).all()) self.assertTrue((t_image_np == t_image_pil).all()) def test_torch_3D_2D_inputs(self): height, width = 32, 32 im_tensor = torch.randint( 0, 255, ( 3, height, width, ), dtype=torch.uint8, ) mask_tensor = ( torch.randint( 0, 255, ( height, width, ), dtype=torch.uint8, ) > 127.5 ) im_np = im_tensor.numpy().transpose(1, 2, 0) mask_np = mask_tensor.numpy() t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image( im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True ) t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image( im_np, mask_np, height, width, return_image=True ) self.assertTrue((t_mask_tensor == t_mask_np).all()) self.assertTrue((t_masked_tensor == t_masked_np).all()) self.assertTrue((t_image_tensor == t_image_np).all()) def test_torch_3D_3D_inputs(self): height, width = 32, 32 im_tensor = torch.randint( 0, 255, ( 3, height, width, ), dtype=torch.uint8, ) mask_tensor = ( torch.randint( 0, 255, ( 1, height, width, ), dtype=torch.uint8, ) > 127.5 ) im_np = im_tensor.numpy().transpose(1, 2, 0) mask_np = mask_tensor.numpy()[0] t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image( im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True ) t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image( im_np, mask_np, height, width, return_image=True ) self.assertTrue((t_mask_tensor == t_mask_np).all()) self.assertTrue((t_masked_tensor == t_masked_np).all()) self.assertTrue((t_image_tensor == t_image_np).all()) def test_torch_4D_2D_inputs(self): height, width = 32, 32 im_tensor = torch.randint( 0, 255, ( 1, 3, height, width, ), dtype=torch.uint8, ) mask_tensor = ( torch.randint( 0, 255, ( height, width, ), dtype=torch.uint8, ) > 127.5 ) im_np = im_tensor.numpy()[0].transpose(1, 2, 0) mask_np = mask_tensor.numpy() t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image( im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True ) t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image( im_np, mask_np, height, width, return_image=True ) self.assertTrue((t_mask_tensor == t_mask_np).all()) self.assertTrue((t_masked_tensor == t_masked_np).all()) self.assertTrue((t_image_tensor == t_image_np).all()) def test_torch_4D_3D_inputs(self): height, width = 32, 32 im_tensor = torch.randint( 0, 255, ( 1, 3, height, width, ), dtype=torch.uint8, ) mask_tensor = ( torch.randint( 0, 255, ( 1, height, width, ), dtype=torch.uint8, ) > 127.5 ) im_np = im_tensor.numpy()[0].transpose(1, 2, 0) mask_np = mask_tensor.numpy()[0] t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image( im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True ) t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image( im_np, mask_np, height, width, return_image=True ) self.assertTrue((t_mask_tensor == t_mask_np).all()) self.assertTrue((t_masked_tensor == t_masked_np).all()) self.assertTrue((t_image_tensor == t_image_np).all()) def test_torch_4D_4D_inputs(self): height, width = 32, 32 im_tensor = torch.randint( 0, 255, ( 1, 3, height, width, ), dtype=torch.uint8, ) mask_tensor = ( torch.randint( 0, 255, ( 1, 1, height, width, ), dtype=torch.uint8, ) > 127.5 ) im_np = im_tensor.numpy()[0].transpose(1, 2, 0) mask_np = mask_tensor.numpy()[0][0] t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image( im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True ) t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image( im_np, mask_np, height, width, return_image=True ) self.assertTrue((t_mask_tensor == t_mask_np).all()) self.assertTrue((t_masked_tensor == t_masked_np).all()) self.assertTrue((t_image_tensor == t_image_np).all()) def test_torch_batch_4D_3D(self): height, width = 32, 32 im_tensor = torch.randint( 0, 255, ( 2, 3, height, width, ), dtype=torch.uint8, ) mask_tensor = ( torch.randint( 0, 255, ( 2, height, width, ), dtype=torch.uint8, ) > 127.5 ) im_nps = [im.numpy().transpose(1, 2, 0) for im in im_tensor] mask_nps = [mask.numpy() for mask in mask_tensor] t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image( im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True ) nps = [prepare_mask_and_masked_image(i, m, height, width, return_image=True) for i, m in zip(im_nps, mask_nps)] t_mask_np = torch.cat([n[0] for n in nps]) t_masked_np = torch.cat([n[1] for n in nps]) t_image_np = torch.cat([n[2] for n in nps]) self.assertTrue((t_mask_tensor == t_mask_np).all()) self.assertTrue((t_masked_tensor == t_masked_np).all()) self.assertTrue((t_image_tensor == t_image_np).all()) def test_torch_batch_4D_4D(self): height, width = 32, 32 im_tensor = torch.randint( 0, 255, ( 2, 3, height, width, ), dtype=torch.uint8, ) mask_tensor = ( torch.randint( 0, 255, ( 2, 1, height, width, ), dtype=torch.uint8, ) > 127.5 ) im_nps = [im.numpy().transpose(1, 2, 0) for im in im_tensor] mask_nps = [mask.numpy()[0] for mask in mask_tensor] t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image( im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True ) nps = [prepare_mask_and_masked_image(i, m, height, width, return_image=True) for i, m in zip(im_nps, mask_nps)] t_mask_np = torch.cat([n[0] for n in nps]) t_masked_np = torch.cat([n[1] for n in nps]) t_image_np = torch.cat([n[2] for n in nps]) self.assertTrue((t_mask_tensor == t_mask_np).all()) self.assertTrue((t_masked_tensor == t_masked_np).all()) self.assertTrue((t_image_tensor == t_image_np).all()) def test_shape_mismatch(self): height, width = 32, 32 # test height and width with self.assertRaises(AssertionError): prepare_mask_and_masked_image( torch.randn( 3, height, width, ), torch.randn(64, 64), height, width, return_image=True, ) # test batch dim with self.assertRaises(AssertionError): prepare_mask_and_masked_image( torch.randn( 2, 3, height, width, ), torch.randn(4, 64, 64), height, width, return_image=True, ) # test batch dim with self.assertRaises(AssertionError): prepare_mask_and_masked_image( torch.randn( 2, 3, height, width, ), torch.randn(4, 1, 64, 64), height, width, return_image=True, ) def test_type_mismatch(self): height, width = 32, 32 # test tensors-only with self.assertRaises(TypeError): prepare_mask_and_masked_image( torch.rand( 3, height, width, ), torch.rand( 3, height, width, ).numpy(), height, width, return_image=True, ) # test tensors-only with self.assertRaises(TypeError): prepare_mask_and_masked_image( torch.rand( 3, height, width, ).numpy(), torch.rand( 3, height, width, ), height, width, return_image=True, ) def test_channels_first(self): height, width = 32, 32 # test channels first for 3D tensors with self.assertRaises(AssertionError): prepare_mask_and_masked_image( torch.rand(height, width, 3), torch.rand( 3, height, width, ), height, width, return_image=True, ) def test_tensor_range(self): height, width = 32, 32 # test im <= 1 with self.assertRaises(ValueError): prepare_mask_and_masked_image( torch.ones( 3, height, width, ) * 2, torch.rand( height, width, ), height, width, return_image=True, ) # test im >= -1 with self.assertRaises(ValueError): prepare_mask_and_masked_image( torch.ones( 3, height, width, ) * (-2), torch.rand( height, width, ), height, width, return_image=True, ) # test mask <= 1 with self.assertRaises(ValueError): prepare_mask_and_masked_image( torch.rand( 3, height, width, ), torch.ones( height, width, ) * 2, height, width, return_image=True, ) # test mask >= 0 with self.assertRaises(ValueError): prepare_mask_and_masked_image( torch.rand( 3, height, width, ), torch.ones( height, width, ) * -1, height, width, return_image=True, )
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_upscale.py
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import ( floats_tensor, is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class OnnxStableDiffusionUpscalePipelineFastTests(OnnxPipelineTesterMixin, unittest.TestCase): # TODO: is there an appropriate internal test set? hub_checkpoint = "ssube/stable-diffusion-x4-upscaler-onnx" def get_dummy_inputs(self, seed=0): image = floats_tensor((1, 3, 128, 128), rng=random.Random(seed)) generator = np.random.RandomState(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_pipeline_default_ddpm(self): pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.6957, 0.7002, 0.7186, 0.6881, 0.6693, 0.6910, 0.7445, 0.7274, 0.7056]) assert np.abs(image_slice - expected_slice).max() < 1e-1 def test_pipeline_pndm(self): pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=True) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.7349, 0.7347, 0.7034, 0.7696, 0.7876, 0.7597, 0.7916, 0.8085, 0.8036]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def test_pipeline_dpm_multistep(self): pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array( [0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def test_pipeline_euler(self): pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array( [0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def test_pipeline_euler_ancestral(self): pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array( [0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class OnnxStableDiffusionUpscalePipelineIntegrationTests(unittest.TestCase): @property def gpu_provider(self): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def gpu_options(self): options = ort.SessionOptions() options.enable_mem_pattern = False return options def test_inference_default_ddpm(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) init_image = init_image.resize((128, 128)) # using the PNDM scheduler by default pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=None) prompt = "A fantasy landscape, trending on artstation" generator = np.random.RandomState(0) output = pipe( prompt=prompt, image=init_image, guidance_scale=7.5, num_inference_steps=10, generator=generator, output_type="np", ) images = output.images image_slice = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) expected_slice = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2 def test_inference_k_lms(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) init_image = init_image.resize((128, 128)) lms_scheduler = LMSDiscreteScheduler.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", subfolder="scheduler" ) pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", scheduler=lms_scheduler, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=None) prompt = "A fantasy landscape, trending on artstation" generator = np.random.RandomState(0) output = pipe( prompt=prompt, image=init_image, guidance_scale=7.5, num_inference_steps=20, generator=generator, output_type="np", ) images = output.images image_slice = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) expected_slice = np.array( [0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import time import traceback import unittest import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LCMScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNet2DConditionModel, logging, ) from diffusers.models.attention_processor import AttnProcessor from diffusers.utils.testing_utils import ( CaptureLogger, enable_full_determinism, load_image, load_numpy, nightly, numpy_cosine_similarity_distance, require_python39_or_higher, require_torch_2, require_torch_gpu, run_test_in_subprocess, slow, torch_device, ) from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() # Will be run via run_test_in_subprocess def _test_stable_diffusion_compile(in_queue, out_queue, timeout): error = None try: inputs = in_queue.get(timeout=timeout) torch_device = inputs.pop("torch_device") seed = inputs.pop("seed") inputs["generator"] = torch.Generator(device=torch_device).manual_seed(seed) sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None) sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.unet.to(memory_format=torch.channels_last) sd_pipe.unet = torch.compile(sd_pipe.unet, mode="reduce-overhead", fullgraph=True) sd_pipe.set_progress_bar_config(disable=None) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.38019, 0.28647, 0.27321, 0.40377, 0.38290, 0.35446, 0.39218, 0.38165, 0.42239]) assert np.abs(image_slice - expected_slice).max() < 5e-3 except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() class StableDiffusionPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=1, sample_size=32, time_cond_proj_dim=time_cond_proj_dim, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=2, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=64, layer_norm_eps=1e-05, num_attention_heads=8, num_hidden_layers=3, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_stable_diffusion_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3203, 0.4555, 0.4711, 0.3505, 0.3973, 0.4650, 0.5137, 0.3392, 0.4045]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3454, 0.5349, 0.5185, 0.2808, 0.4509, 0.4612, 0.4655, 0.3601, 0.4315]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3454, 0.5349, 0.5185, 0.2808, 0.4509, 0.4612, 0.4655, 0.3601, 0.4315]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] text_inputs = sd_pipe.tokenizer( prompt, padding="max_length", max_length=sd_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) prompt_embeds = sd_pipe.text_encoder(text_inputs)[0] inputs["prompt_embeds"] = prompt_embeds # forward output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_stable_diffusion_negative_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] embeds = [] for p in [prompt, negative_prompt]: text_inputs = sd_pipe.tokenizer( p, padding="max_length", max_length=sd_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) embeds.append(sd_pipe.text_encoder(text_inputs)[0]) inputs["prompt_embeds"], inputs["negative_prompt_embeds"] = embeds # forward output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_stable_diffusion_prompt_embeds_with_plain_negative_prompt_list(self): components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = negative_prompt prompt = 3 * [inputs.pop("prompt")] text_inputs = sd_pipe.tokenizer( prompt, padding="max_length", max_length=sd_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) prompt_embeds = sd_pipe.text_encoder(text_inputs)[0] inputs["prompt_embeds"] = prompt_embeds # forward output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_stable_diffusion_ddim_factor_8(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs, height=136, width=136) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 136, 136, 3) expected_slice = np.array([0.4346, 0.5621, 0.5016, 0.3926, 0.4533, 0.4134, 0.5625, 0.5632, 0.5265]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_pndm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe.scheduler = PNDMScheduler(skip_prk_steps=True) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3411, 0.5032, 0.4704, 0.3135, 0.4323, 0.4740, 0.5150, 0.3498, 0.4022]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_no_safety_checker(self): pipe = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-lms-pipe", safety_checker=None ) assert isinstance(pipe, StableDiffusionPipeline) assert isinstance(pipe.scheduler, LMSDiscreteScheduler) assert pipe.safety_checker is None image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(tmpdirname) pipe = StableDiffusionPipeline.from_pretrained(tmpdirname) # sanity check that the pipeline still works assert pipe.safety_checker is None image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None def test_stable_diffusion_k_lms(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3149, 0.5246, 0.4796, 0.3218, 0.4469, 0.4729, 0.5151, 0.3597, 0.3954]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_k_euler_ancestral(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3151, 0.5243, 0.4794, 0.3217, 0.4468, 0.4728, 0.5152, 0.3598, 0.3954]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_k_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe.scheduler = EulerDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3149, 0.5246, 0.4796, 0.3218, 0.4469, 0.4729, 0.5151, 0.3597, 0.3954]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_vae_slicing(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) image_count = 4 inputs = self.get_dummy_inputs(device) inputs["prompt"] = [inputs["prompt"]] * image_count output_1 = sd_pipe(**inputs) # make sure sliced vae decode yields the same result sd_pipe.enable_vae_slicing() inputs = self.get_dummy_inputs(device) inputs["prompt"] = [inputs["prompt"]] * image_count output_2 = sd_pipe(**inputs) # there is a small discrepancy at image borders vs. full batch decode assert np.abs(output_2.images.flatten() - output_1.images.flatten()).max() < 3e-3 def test_stable_diffusion_vae_tiling(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() # make sure here that pndm scheduler skips prk components["safety_checker"] = None sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" # Test that tiled decode at 512x512 yields the same result as the non-tiled decode generator = torch.Generator(device=device).manual_seed(0) output_1 = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") # make sure tiled vae decode yields the same result sd_pipe.enable_vae_tiling() generator = torch.Generator(device=device).manual_seed(0) output_2 = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") assert np.abs(output_2.images.flatten() - output_1.images.flatten()).max() < 5e-1 # test that tiled decode works with various shapes shapes = [(1, 4, 73, 97), (1, 4, 97, 73), (1, 4, 49, 65), (1, 4, 65, 49)] for shape in shapes: zeros = torch.zeros(shape).to(device) sd_pipe.vae.decode(zeros) def test_stable_diffusion_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" output = sd_pipe(**inputs, negative_prompt=negative_prompt) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3458, 0.5120, 0.4800, 0.3116, 0.4348, 0.4802, 0.5237, 0.3467, 0.3991]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_long_prompt(self): components = self.get_dummy_components() components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) do_classifier_free_guidance = True negative_prompt = None num_images_per_prompt = 1 logger = logging.get_logger("diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion") logger.setLevel(logging.WARNING) prompt = 100 * "@" with CaptureLogger(logger) as cap_logger: negative_text_embeddings, text_embeddings = sd_pipe.encode_prompt( prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) if negative_text_embeddings is not None: text_embeddings = torch.cat([negative_text_embeddings, text_embeddings]) # 100 - 77 + 1 (BOS token) + 1 (EOS token) = 25 assert cap_logger.out.count("@") == 25 negative_prompt = "Hello" with CaptureLogger(logger) as cap_logger_2: negative_text_embeddings_2, text_embeddings_2 = sd_pipe.encode_prompt( prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) if negative_text_embeddings_2 is not None: text_embeddings_2 = torch.cat([negative_text_embeddings_2, text_embeddings_2]) assert cap_logger.out == cap_logger_2.out prompt = 25 * "@" with CaptureLogger(logger) as cap_logger_3: negative_text_embeddings_3, text_embeddings_3 = sd_pipe.encode_prompt( prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) if negative_text_embeddings_3 is not None: text_embeddings_3 = torch.cat([negative_text_embeddings_3, text_embeddings_3]) assert text_embeddings_3.shape == text_embeddings_2.shape == text_embeddings.shape assert text_embeddings.shape[1] == 77 assert cap_logger_3.out == "" def test_stable_diffusion_height_width_opt(self): components = self.get_dummy_components() components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) prompt = "hey" output = sd_pipe(prompt, num_inference_steps=1, output_type="np") image_shape = output.images[0].shape[:2] assert image_shape == (64, 64) output = sd_pipe(prompt, num_inference_steps=1, height=96, width=96, output_type="np") image_shape = output.images[0].shape[:2] assert image_shape == (96, 96) config = dict(sd_pipe.unet.config) config["sample_size"] = 96 sd_pipe.unet = UNet2DConditionModel.from_config(config).to(torch_device) output = sd_pipe(prompt, num_inference_steps=1, output_type="np") image_shape = output.images[0].shape[:2] assert image_shape == (192, 192) def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) def test_freeu_enabled(self): components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) prompt = "hey" output = sd_pipe(prompt, num_inference_steps=1, output_type="np", generator=torch.manual_seed(0)).images sd_pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4) output_freeu = sd_pipe(prompt, num_inference_steps=1, output_type="np", generator=torch.manual_seed(0)).images assert not np.allclose( output[0, -3:, -3:, -1], output_freeu[0, -3:, -3:, -1] ), "Enabling of FreeU should lead to different results." def test_freeu_disabled(self): components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) prompt = "hey" output = sd_pipe(prompt, num_inference_steps=1, output_type="np", generator=torch.manual_seed(0)).images sd_pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4) sd_pipe.disable_freeu() freeu_keys = {"s1", "s2", "b1", "b2"} for upsample_block in sd_pipe.unet.up_blocks: for key in freeu_keys: assert getattr(upsample_block, key) is None, f"Disabling of FreeU should have set {key} to None." output_no_freeu = sd_pipe( prompt, num_inference_steps=1, output_type="np", generator=torch.manual_seed(0) ).images assert np.allclose( output[0, -3:, -3:, -1], output_no_freeu[0, -3:, -3:, -1] ), "Disabling of FreeU should lead to results similar to the default pipeline results." @slow @require_torch_gpu class StableDiffusionPipelineSlowTests(unittest.TestCase): def setUp(self): gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_stable_diffusion_1_1_pndm(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-1") sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.4363, 0.4355, 0.3667, 0.4066, 0.3970, 0.3866, 0.4394, 0.4356, 0.4059]) assert np.abs(image_slice - expected_slice).max() < 3e-3 def test_stable_diffusion_v1_4_with_freeu(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 25 sd_pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4) image = sd_pipe(**inputs).images image = image[0, -3:, -3:, -1].flatten() expected_image = [0.0721, 0.0588, 0.0268, 0.0384, 0.0636, 0.0, 0.0429, 0.0344, 0.0309] max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_stable_diffusion_1_4_pndm(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.5740, 0.4784, 0.3162, 0.6358, 0.5831, 0.5505, 0.5082, 0.5631, 0.5575]) assert np.abs(image_slice - expected_slice).max() < 3e-3 def test_stable_diffusion_ddim(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None) sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.38019, 0.28647, 0.27321, 0.40377, 0.38290, 0.35446, 0.39218, 0.38165, 0.42239]) assert np.abs(image_slice - expected_slice).max() < 1e-4 def test_stable_diffusion_lms(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None) sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.10542, 0.09620, 0.07332, 0.09015, 0.09382, 0.07597, 0.08496, 0.07806, 0.06455]) assert np.abs(image_slice - expected_slice).max() < 3e-3 def test_stable_diffusion_dpm(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None) sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.03503, 0.03494, 0.01087, 0.03128, 0.02552, 0.00803, 0.00742, 0.00372, 0.00000]) assert np.abs(image_slice - expected_slice).max() < 3e-3 def test_stable_diffusion_attention_slicing(self): torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) pipe.unet.set_default_attn_processor() pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # enable attention slicing pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device, dtype=torch.float16) image_sliced = pipe(**inputs).images mem_bytes = torch.cuda.max_memory_allocated() torch.cuda.reset_peak_memory_stats() # make sure that less than 3.75 GB is allocated assert mem_bytes < 3.75 * 10**9 # disable slicing pipe.disable_attention_slicing() pipe.unet.set_default_attn_processor() inputs = self.get_inputs(torch_device, dtype=torch.float16) image = pipe(**inputs).images # make sure that more than 3.75 GB is allocated mem_bytes = torch.cuda.max_memory_allocated() assert mem_bytes > 3.75 * 10**9 max_diff = numpy_cosine_similarity_distance(image_sliced.flatten(), image.flatten()) assert max_diff < 1e-3 def test_stable_diffusion_vae_slicing(self): torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() # enable vae slicing pipe.enable_vae_slicing() inputs = self.get_inputs(torch_device, dtype=torch.float16) inputs["prompt"] = [inputs["prompt"]] * 4 inputs["latents"] = torch.cat([inputs["latents"]] * 4) image_sliced = pipe(**inputs).images mem_bytes = torch.cuda.max_memory_allocated() torch.cuda.reset_peak_memory_stats() # make sure that less than 4 GB is allocated assert mem_bytes < 4e9 # disable vae slicing pipe.disable_vae_slicing() inputs = self.get_inputs(torch_device, dtype=torch.float16) inputs["prompt"] = [inputs["prompt"]] * 4 inputs["latents"] = torch.cat([inputs["latents"]] * 4) image = pipe(**inputs).images # make sure that more than 4 GB is allocated mem_bytes = torch.cuda.max_memory_allocated() assert mem_bytes > 4e9 # There is a small discrepancy at the image borders vs. a fully batched version. max_diff = numpy_cosine_similarity_distance(image_sliced.flatten(), image.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_vae_tiling(self): torch.cuda.reset_peak_memory_stats() model_id = "CompVis/stable-diffusion-v1-4" pipe = StableDiffusionPipeline.from_pretrained( model_id, revision="fp16", torch_dtype=torch.float16, safety_checker=None ) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() pipe.unet = pipe.unet.to(memory_format=torch.channels_last) pipe.vae = pipe.vae.to(memory_format=torch.channels_last) prompt = "a photograph of an astronaut riding a horse" # enable vae tiling pipe.enable_vae_tiling() pipe.enable_model_cpu_offload() generator = torch.Generator(device="cpu").manual_seed(0) output_chunked = pipe( [prompt], width=1024, height=1024, generator=generator, guidance_scale=7.5, num_inference_steps=2, output_type="numpy", ) image_chunked = output_chunked.images mem_bytes = torch.cuda.max_memory_allocated() # disable vae tiling pipe.disable_vae_tiling() generator = torch.Generator(device="cpu").manual_seed(0) output = pipe( [prompt], width=1024, height=1024, generator=generator, guidance_scale=7.5, num_inference_steps=2, output_type="numpy", ) image = output.images assert mem_bytes < 1e10 max_diff = numpy_cosine_similarity_distance(image_chunked.flatten(), image.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_fp16_vs_autocast(self): # this test makes sure that the original model with autocast # and the new model with fp16 yield the same result pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device, dtype=torch.float16) image_fp16 = pipe(**inputs).images with torch.autocast(torch_device): inputs = self.get_inputs(torch_device) image_autocast = pipe(**inputs).images # Make sure results are close enough diff = np.abs(image_fp16.flatten() - image_autocast.flatten()) # They ARE different since ops are not run always at the same precision # however, they should be extremely close. assert diff.mean() < 2e-2 def test_stable_diffusion_intermediate_state(self): number_of_steps = 0 def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None: callback_fn.has_been_called = True nonlocal number_of_steps number_of_steps += 1 if step == 1: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [-0.5693, -0.3018, -0.9746, 0.0518, -0.8770, 0.7559, -1.7402, 0.1022, 1.1582] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 elif step == 2: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [-0.1958, -0.2993, -1.0166, -0.5005, -0.4810, 0.6162, -0.9492, 0.6621, 1.4492] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 callback_fn.has_been_called = False pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device, dtype=torch.float16) pipe(**inputs, callback=callback_fn, callback_steps=1) assert callback_fn.has_been_called assert number_of_steps == inputs["num_inference_steps"] def test_stable_diffusion_low_cpu_mem_usage(self): pipeline_id = "CompVis/stable-diffusion-v1-4" start_time = time.time() pipeline_low_cpu_mem_usage = StableDiffusionPipeline.from_pretrained(pipeline_id, torch_dtype=torch.float16) pipeline_low_cpu_mem_usage.to(torch_device) low_cpu_mem_usage_time = time.time() - start_time start_time = time.time() _ = StableDiffusionPipeline.from_pretrained(pipeline_id, torch_dtype=torch.float16, low_cpu_mem_usage=False) normal_load_time = time.time() - start_time assert 2 * low_cpu_mem_usage_time < normal_load_time def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() inputs = self.get_inputs(torch_device, dtype=torch.float16) _ = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 2.8 GB is allocated assert mem_bytes < 2.8 * 10**9 def test_stable_diffusion_pipeline_with_model_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() inputs = self.get_inputs(torch_device, dtype=torch.float16) # Normal inference pipe = StableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16, ) pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) outputs = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # With model offloading # Reload but don't move to cuda pipe = StableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16, ) pipe.unet.set_default_attn_processor() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device, dtype=torch.float16) outputs_offloaded = pipe(**inputs) mem_bytes_offloaded = torch.cuda.max_memory_allocated() images = outputs.images offloaded_images = outputs_offloaded.images max_diff = numpy_cosine_similarity_distance(images.flatten(), offloaded_images.flatten()) assert max_diff < 1e-3 assert mem_bytes_offloaded < mem_bytes assert mem_bytes_offloaded < 3.5 * 10**9 for module in pipe.text_encoder, pipe.unet, pipe.vae: assert module.device == torch.device("cpu") # With attention slicing torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe.enable_attention_slicing() _ = pipe(**inputs) mem_bytes_slicing = torch.cuda.max_memory_allocated() assert mem_bytes_slicing < mem_bytes_offloaded assert mem_bytes_slicing < 3 * 10**9 def test_stable_diffusion_textual_inversion(self): pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") pipe.load_textual_inversion("sd-concepts-library/low-poly-hd-logos-icons") a111_file = hf_hub_download("hf-internal-testing/text_inv_embedding_a1111_format", "winter_style.pt") a111_file_neg = hf_hub_download( "hf-internal-testing/text_inv_embedding_a1111_format", "winter_style_negative.pt" ) pipe.load_textual_inversion(a111_file) pipe.load_textual_inversion(a111_file_neg) pipe.to("cuda") generator = torch.Generator(device="cpu").manual_seed(1) prompt = "An logo of a turtle in strong Style-Winter with <low-poly-hd-logos-icons>" neg_prompt = "Style-Winter-neg" image = pipe(prompt=prompt, negative_prompt=neg_prompt, generator=generator, output_type="np").images[0] expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_inv/winter_logo_style.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 8e-1 def test_stable_diffusion_textual_inversion_with_model_cpu_offload(self): pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") pipe.enable_model_cpu_offload() pipe.load_textual_inversion("sd-concepts-library/low-poly-hd-logos-icons") a111_file = hf_hub_download("hf-internal-testing/text_inv_embedding_a1111_format", "winter_style.pt") a111_file_neg = hf_hub_download( "hf-internal-testing/text_inv_embedding_a1111_format", "winter_style_negative.pt" ) pipe.load_textual_inversion(a111_file) pipe.load_textual_inversion(a111_file_neg) generator = torch.Generator(device="cpu").manual_seed(1) prompt = "An logo of a turtle in strong Style-Winter with <low-poly-hd-logos-icons>" neg_prompt = "Style-Winter-neg" image = pipe(prompt=prompt, negative_prompt=neg_prompt, generator=generator, output_type="np").images[0] expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_inv/winter_logo_style.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 8e-1 def test_stable_diffusion_textual_inversion_with_sequential_cpu_offload(self): pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") pipe.enable_sequential_cpu_offload() pipe.load_textual_inversion("sd-concepts-library/low-poly-hd-logos-icons") a111_file = hf_hub_download("hf-internal-testing/text_inv_embedding_a1111_format", "winter_style.pt") a111_file_neg = hf_hub_download( "hf-internal-testing/text_inv_embedding_a1111_format", "winter_style_negative.pt" ) pipe.load_textual_inversion(a111_file) pipe.load_textual_inversion(a111_file_neg) generator = torch.Generator(device="cpu").manual_seed(1) prompt = "An logo of a turtle in strong Style-Winter with <low-poly-hd-logos-icons>" neg_prompt = "Style-Winter-neg" image = pipe(prompt=prompt, negative_prompt=neg_prompt, generator=generator, output_type="np").images[0] expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_inv/winter_logo_style.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 8e-1 @require_python39_or_higher @require_torch_2 def test_stable_diffusion_compile(self): seed = 0 inputs = self.get_inputs(torch_device, seed=seed) # Can't pickle a Generator object del inputs["generator"] inputs["torch_device"] = torch_device inputs["seed"] = seed run_test_in_subprocess(test_case=self, target_func=_test_stable_diffusion_compile, inputs=inputs) def test_stable_diffusion_lcm(self): unet = UNet2DConditionModel.from_pretrained("SimianLuo/LCM_Dreamshaper_v7", subfolder="unet") sd_pipe = StableDiffusionPipeline.from_pretrained("Lykon/dreamshaper-7", unet=unet).to(torch_device) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 6 inputs["output_type"] = "pil" image = sd_pipe(**inputs).images[0] expected_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/lcm_full/stable_diffusion_lcm.png" ) image = sd_pipe.image_processor.pil_to_numpy(image) expected_image = sd_pipe.image_processor.pil_to_numpy(expected_image) max_diff = numpy_cosine_similarity_distance(image.flatten(), expected_image.flatten()) assert max_diff < 1e-2 @slow @require_torch_gpu class StableDiffusionPipelineCkptTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_download_from_hub(self): ckpt_paths = [ "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt", "https://huggingface.co/WarriorMama777/OrangeMixs/blob/main/Models/AbyssOrangeMix/AbyssOrangeMix_base.ckpt", ] for ckpt_path in ckpt_paths: pipe = StableDiffusionPipeline.from_single_file(ckpt_path, torch_dtype=torch.float16) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.to("cuda") image_out = pipe("test", num_inference_steps=1, output_type="np").images[0] assert image_out.shape == (512, 512, 3) def test_download_local(self): filename = hf_hub_download("runwayml/stable-diffusion-v1-5", filename="v1-5-pruned-emaonly.ckpt") pipe = StableDiffusionPipeline.from_single_file(filename, torch_dtype=torch.float16) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.to("cuda") image_out = pipe("test", num_inference_steps=1, output_type="np").images[0] assert image_out.shape == (512, 512, 3) def test_download_ckpt_diff_format_is_same(self): ckpt_path = "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt" pipe = StableDiffusionPipeline.from_single_file(ckpt_path) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.unet.set_attn_processor(AttnProcessor()) pipe.to("cuda") generator = torch.Generator(device="cpu").manual_seed(0) image_ckpt = pipe("a turtle", num_inference_steps=2, generator=generator, output_type="np").images[0] pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.unet.set_attn_processor(AttnProcessor()) pipe.to("cuda") generator = torch.Generator(device="cpu").manual_seed(0) image = pipe("a turtle", num_inference_steps=2, generator=generator, output_type="np").images[0] max_diff = numpy_cosine_similarity_distance(image.flatten(), image_ckpt.flatten()) assert max_diff < 1e-3 @nightly @require_torch_gpu class StableDiffusionPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_stable_diffusion_1_4_pndm(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_text2img/stable_diffusion_1_4_pndm.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_stable_diffusion_1_5_pndm(self): sd_pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5").to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_text2img/stable_diffusion_1_5_pndm.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_stable_diffusion_ddim(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(torch_device) sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_text2img/stable_diffusion_1_4_ddim.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 3e-3 def test_stable_diffusion_lms(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(torch_device) sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_text2img/stable_diffusion_1_4_lms.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_stable_diffusion_euler(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(torch_device) sd_pipe.scheduler = EulerDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_text2img/stable_diffusion_1_4_euler.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint_legacy.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInpaintPipelineLegacy, UNet2DConditionModel, UNet2DModel, VQModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, preprocess_image, require_torch_gpu, slow, torch_device, ) enable_full_determinism() class StableDiffusionInpaintLegacyPipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def dummy_image(self): batch_size = 1 num_channels = 3 sizes = (32, 32) image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) return image @property def dummy_uncond_unet(self): torch.manual_seed(0) model = UNet2DModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), ) return model @property def dummy_cond_unet(self): torch.manual_seed(0) model = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) return model @property def dummy_cond_unet_inpaint(self): torch.manual_seed(0) model = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) return model @property def dummy_vq_model(self): torch.manual_seed(0) model = VQModel( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=3, ) return model @property def dummy_vae(self): torch.manual_seed(0) model = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) return model @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(config) @property def dummy_extractor(self): def extract(*args, **kwargs): class Out: def __init__(self): self.pixel_values = torch.ones([0]) def to(self, device): self.pixel_values.to(device) return self return Out() return extract def test_stable_diffusion_inpaint_legacy(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unet = self.dummy_cond_unet scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB") mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((32, 32)) # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionInpaintPipelineLegacy( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.Generator(device=device).manual_seed(0) output = sd_pipe( [prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np", image=init_image, mask_image=mask_image, ) image = output.images generator = torch.Generator(device=device).manual_seed(0) image_from_tuple = sd_pipe( [prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np", image=init_image, mask_image=mask_image, return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4941, 0.5396, 0.4689, 0.6338, 0.5392, 0.4094, 0.5477, 0.5904, 0.5165]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_legacy_batched(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unet = self.dummy_cond_unet scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB") init_images_tens = preprocess_image(init_image, batch_size=2) init_masks_tens = init_images_tens + 4 # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionInpaintPipelineLegacy( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.Generator(device=device).manual_seed(0) images = sd_pipe( [prompt] * 2, generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np", image=init_images_tens, mask_image=init_masks_tens, ).images assert images.shape == (2, 32, 32, 3) image_slice_0 = images[0, -3:, -3:, -1].flatten() image_slice_1 = images[1, -3:, -3:, -1].flatten() expected_slice_0 = np.array([0.4697, 0.3770, 0.4096, 0.4653, 0.4497, 0.4183, 0.3950, 0.4668, 0.4672]) expected_slice_1 = np.array([0.4105, 0.4987, 0.5771, 0.4921, 0.4237, 0.5684, 0.5496, 0.4645, 0.5272]) assert np.abs(expected_slice_0 - image_slice_0).max() < 1e-2 assert np.abs(expected_slice_1 - image_slice_1).max() < 1e-2 def test_stable_diffusion_inpaint_legacy_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unet = self.dummy_cond_unet scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB") mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((32, 32)) # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionInpaintPipelineLegacy( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" negative_prompt = "french fries" generator = torch.Generator(device=device).manual_seed(0) output = sd_pipe( prompt, negative_prompt=negative_prompt, generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np", image=init_image, mask_image=mask_image, ) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4941, 0.5396, 0.4689, 0.6338, 0.5392, 0.4094, 0.5477, 0.5904, 0.5165]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_legacy_num_images_per_prompt(self): device = "cpu" unet = self.dummy_cond_unet scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB") mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((32, 32)) # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionInpaintPipelineLegacy( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" # test num_images_per_prompt=1 (default) images = sd_pipe( prompt, num_inference_steps=2, output_type="np", image=init_image, mask_image=mask_image, ).images assert images.shape == (1, 32, 32, 3) # test num_images_per_prompt=1 (default) for batch of prompts batch_size = 2 images = sd_pipe( [prompt] * batch_size, num_inference_steps=2, output_type="np", image=init_image, mask_image=mask_image, ).images assert images.shape == (batch_size, 32, 32, 3) # test num_images_per_prompt for single prompt num_images_per_prompt = 2 images = sd_pipe( prompt, num_inference_steps=2, output_type="np", image=init_image, mask_image=mask_image, num_images_per_prompt=num_images_per_prompt, ).images assert images.shape == (num_images_per_prompt, 32, 32, 3) # test num_images_per_prompt for batch of prompts batch_size = 2 images = sd_pipe( [prompt] * batch_size, num_inference_steps=2, output_type="np", image=init_image, mask_image=mask_image, num_images_per_prompt=num_images_per_prompt, ).images assert images.shape == (batch_size * num_images_per_prompt, 32, 32, 3) @slow @require_torch_gpu class StableDiffusionInpaintLegacyPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, generator_device="cpu", seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_mask.png" ) inputs = { "prompt": "A red cat sitting on a park bench", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_stable_diffusion_inpaint_legacy_pndm(self): pipe = StableDiffusionInpaintPipelineLegacy.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.5665, 0.6117, 0.6430, 0.4057, 0.4594, 0.5658, 0.1596, 0.3106, 0.4305]) assert np.abs(expected_slice - image_slice).max() < 3e-3 def test_stable_diffusion_inpaint_legacy_batched(self): pipe = StableDiffusionInpaintPipelineLegacy.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() inputs["prompt"] = [inputs["prompt"]] * 2 inputs["image"] = preprocess_image(inputs["image"], batch_size=2) mask = inputs["mask_image"].convert("L") mask = np.array(mask).astype(np.float32) / 255.0 mask = torch.from_numpy(1 - mask) masks = torch.vstack([mask[None][None]] * 2) inputs["mask_image"] = masks image = pipe(**inputs).images assert image.shape == (2, 512, 512, 3) image_slice_0 = image[0, 253:256, 253:256, -1].flatten() image_slice_1 = image[1, 253:256, 253:256, -1].flatten() expected_slice_0 = np.array( [0.52093095, 0.4176447, 0.32752383, 0.6175223, 0.50563973, 0.36470804, 0.65460044, 0.5775188, 0.44332123] ) expected_slice_1 = np.array( [0.3592432, 0.4233033, 0.3914635, 0.31014425, 0.3702293, 0.39412856, 0.17526966, 0.2642669, 0.37480092] ) assert np.abs(expected_slice_0 - image_slice_0).max() < 3e-3 assert np.abs(expected_slice_1 - image_slice_1).max() < 3e-3 def test_stable_diffusion_inpaint_legacy_k_lms(self): pipe = StableDiffusionInpaintPipelineLegacy.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None ) pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.4534, 0.4467, 0.4329, 0.4329, 0.4339, 0.4220, 0.4244, 0.4332, 0.4426]) assert np.abs(expected_slice - image_slice).max() < 3e-3 def test_stable_diffusion_inpaint_legacy_intermediate_state(self): number_of_steps = 0 def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None: callback_fn.has_been_called = True nonlocal number_of_steps number_of_steps += 1 if step == 1: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array([0.5977, 1.5449, 1.0586, -0.3250, 0.7383, -0.0862, 0.4631, -0.2571, -1.1289]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3 elif step == 2: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array([0.5190, 1.1621, 0.6885, 0.2424, 0.3337, -0.1617, 0.6914, -0.1957, -0.5474]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3 callback_fn.has_been_called = False pipe = StableDiffusionInpaintPipelineLegacy.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() pipe(**inputs, callback=callback_fn, callback_steps=1) assert callback_fn.has_been_called assert number_of_steps == 2 @nightly @require_torch_gpu class StableDiffusionInpaintLegacyPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_mask.png" ) inputs = { "prompt": "A red cat sitting on a park bench", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 50, "strength": 0.75, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_inpaint_pndm(self): sd_pipe = StableDiffusionInpaintPipelineLegacy.from_pretrained("runwayml/stable-diffusion-v1-5") sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint_legacy/stable_diffusion_1_5_pndm.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_inpaint_ddim(self): sd_pipe = StableDiffusionInpaintPipelineLegacy.from_pretrained("runwayml/stable-diffusion-v1-5") sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint_legacy/stable_diffusion_1_5_ddim.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_inpaint_lms(self): sd_pipe = StableDiffusionInpaintPipelineLegacy.from_pretrained("runwayml/stable-diffusion-v1-5") sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint_legacy/stable_diffusion_1_5_lms.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_inpaint_dpm(self): sd_pipe = StableDiffusionInpaintPipelineLegacy.from_pretrained("runwayml/stable-diffusion-v1-5") sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 30 image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint_legacy/stable_diffusion_1_5_dpm_multi.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/audioldm2/test_audioldm2.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import ( ClapAudioConfig, ClapConfig, ClapFeatureExtractor, ClapModel, ClapTextConfig, GPT2Config, GPT2Model, RobertaTokenizer, SpeechT5HifiGan, SpeechT5HifiGanConfig, T5Config, T5EncoderModel, T5Tokenizer, ) from diffusers import ( AudioLDM2Pipeline, AudioLDM2ProjectionModel, AudioLDM2UNet2DConditionModel, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils.testing_utils import enable_full_determinism, nightly, torch_device from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class AudioLDM2PipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = AudioLDM2Pipeline params = TEXT_TO_AUDIO_PARAMS batch_params = TEXT_TO_AUDIO_BATCH_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) def get_dummy_components(self): torch.manual_seed(0) unet = AudioLDM2UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=([None, 16, 32], [None, 16, 32]), ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=1, out_channels=1, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_branch_config = ClapTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=16, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=2, num_hidden_layers=2, pad_token_id=1, vocab_size=1000, projection_dim=16, ) audio_branch_config = ClapAudioConfig( spec_size=64, window_size=4, num_mel_bins=64, intermediate_size=37, layer_norm_eps=1e-05, depths=[2, 2], num_attention_heads=[2, 2], num_hidden_layers=2, hidden_size=192, projection_dim=16, patch_size=2, patch_stride=2, patch_embed_input_channels=4, ) text_encoder_config = ClapConfig.from_text_audio_configs( text_config=text_branch_config, audio_config=audio_branch_config, projection_dim=16 ) text_encoder = ClapModel(text_encoder_config) tokenizer = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta", model_max_length=77) feature_extractor = ClapFeatureExtractor.from_pretrained( "hf-internal-testing/tiny-random-ClapModel", hop_length=7900 ) torch.manual_seed(0) text_encoder_2_config = T5Config( vocab_size=32100, d_model=32, d_ff=37, d_kv=8, num_heads=2, num_layers=2, ) text_encoder_2 = T5EncoderModel(text_encoder_2_config) tokenizer_2 = T5Tokenizer.from_pretrained("hf-internal-testing/tiny-random-T5Model", model_max_length=77) torch.manual_seed(0) language_model_config = GPT2Config( n_embd=16, n_head=2, n_layer=2, vocab_size=1000, n_ctx=99, n_positions=99, ) language_model = GPT2Model(language_model_config) language_model.config.max_new_tokens = 8 torch.manual_seed(0) projection_model = AudioLDM2ProjectionModel(text_encoder_dim=16, text_encoder_1_dim=32, langauge_model_dim=16) vocoder_config = SpeechT5HifiGanConfig( model_in_dim=8, sampling_rate=16000, upsample_initial_channel=16, upsample_rates=[2, 2], upsample_kernel_sizes=[4, 4], resblock_kernel_sizes=[3, 7], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]], normalize_before=False, ) vocoder = SpeechT5HifiGan(vocoder_config) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "text_encoder_2": text_encoder_2, "tokenizer": tokenizer, "tokenizer_2": tokenizer_2, "feature_extractor": feature_extractor, "language_model": language_model, "projection_model": projection_model, "vocoder": vocoder, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A hammer hitting a wooden surface", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, } return inputs def test_audioldm2_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = audioldm_pipe(**inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) == 256 audio_slice = audio[:10] expected_slice = np.array( [0.0025, 0.0018, 0.0018, -0.0023, -0.0026, -0.0020, -0.0026, -0.0021, -0.0027, -0.0020] ) assert np.abs(audio_slice - expected_slice).max() < 1e-4 def test_audioldm2_prompt_embeds(self): components = self.get_dummy_components() audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = audioldm_pipe(**inputs) audio_1 = output.audios[0] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] text_inputs = audioldm_pipe.tokenizer( prompt, padding="max_length", max_length=audioldm_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) clap_prompt_embeds = audioldm_pipe.text_encoder.get_text_features(text_inputs) clap_prompt_embeds = clap_prompt_embeds[:, None, :] text_inputs = audioldm_pipe.tokenizer_2( prompt, padding="max_length", max_length=True, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) t5_prompt_embeds = audioldm_pipe.text_encoder_2( text_inputs, ) t5_prompt_embeds = t5_prompt_embeds[0] projection_embeds = audioldm_pipe.projection_model(clap_prompt_embeds, t5_prompt_embeds)[0] generated_prompt_embeds = audioldm_pipe.generate_language_model(projection_embeds, max_new_tokens=8) inputs["prompt_embeds"] = t5_prompt_embeds inputs["generated_prompt_embeds"] = generated_prompt_embeds # forward output = audioldm_pipe(**inputs) audio_2 = output.audios[0] assert np.abs(audio_1 - audio_2).max() < 1e-2 def test_audioldm2_negative_prompt_embeds(self): components = self.get_dummy_components() audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = audioldm_pipe(**inputs) audio_1 = output.audios[0] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] embeds = [] generated_embeds = [] for p in [prompt, negative_prompt]: text_inputs = audioldm_pipe.tokenizer( p, padding="max_length", max_length=audioldm_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) clap_prompt_embeds = audioldm_pipe.text_encoder.get_text_features(text_inputs) clap_prompt_embeds = clap_prompt_embeds[:, None, :] text_inputs = audioldm_pipe.tokenizer_2( prompt, padding="max_length", max_length=True if len(embeds) == 0 else embeds[0].shape[1], truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) t5_prompt_embeds = audioldm_pipe.text_encoder_2( text_inputs, ) t5_prompt_embeds = t5_prompt_embeds[0] projection_embeds = audioldm_pipe.projection_model(clap_prompt_embeds, t5_prompt_embeds)[0] generated_prompt_embeds = audioldm_pipe.generate_language_model(projection_embeds, max_new_tokens=8) embeds.append(t5_prompt_embeds) generated_embeds.append(generated_prompt_embeds) inputs["prompt_embeds"], inputs["negative_prompt_embeds"] = embeds inputs["generated_prompt_embeds"], inputs["negative_generated_prompt_embeds"] = generated_embeds # forward output = audioldm_pipe(**inputs) audio_2 = output.audios[0] assert np.abs(audio_1 - audio_2).max() < 1e-2 def test_audioldm2_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "egg cracking" output = audioldm_pipe(**inputs, negative_prompt=negative_prompt) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) == 256 audio_slice = audio[:10] expected_slice = np.array( [0.0025, 0.0018, 0.0018, -0.0023, -0.0026, -0.0020, -0.0026, -0.0021, -0.0027, -0.0020] ) assert np.abs(audio_slice - expected_slice).max() < 1e-4 def test_audioldm2_num_waveforms_per_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(device) audioldm_pipe.set_progress_bar_config(disable=None) prompt = "A hammer hitting a wooden surface" # test num_waveforms_per_prompt=1 (default) audios = audioldm_pipe(prompt, num_inference_steps=2).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts batch_size = 2 audios = audioldm_pipe([prompt] * batch_size, num_inference_steps=2).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt num_waveforms_per_prompt = 2 audios = audioldm_pipe(prompt, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts batch_size = 2 audios = audioldm_pipe( [prompt] * batch_size, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def test_audioldm2_audio_length_in_s(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) vocoder_sampling_rate = audioldm_pipe.vocoder.config.sampling_rate inputs = self.get_dummy_inputs(device) output = audioldm_pipe(audio_length_in_s=0.016, **inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) / vocoder_sampling_rate == 0.016 output = audioldm_pipe(audio_length_in_s=0.032, **inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) / vocoder_sampling_rate == 0.032 def test_audioldm2_vocoder_model_in_dim(self): components = self.get_dummy_components() audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) prompt = ["hey"] output = audioldm_pipe(prompt, num_inference_steps=1) audio_shape = output.audios.shape assert audio_shape == (1, 256) config = audioldm_pipe.vocoder.config config.model_in_dim *= 2 audioldm_pipe.vocoder = SpeechT5HifiGan(config).to(torch_device) output = audioldm_pipe(prompt, num_inference_steps=1) audio_shape = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def test_attention_slicing_forward_pass(self): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=False) @unittest.skip("Raises a not implemented error in AudioLDM2") def test_xformers_attention_forwardGenerator_pass(self): pass def test_dict_tuple_outputs_equivalent(self): # increase tolerance from 1e-4 -> 2e-4 to account for large composite model super().test_dict_tuple_outputs_equivalent(expected_max_difference=2e-4) def test_inference_batch_single_identical(self): # increase tolerance from 1e-4 -> 2e-4 to account for large composite model self._test_inference_batch_single_identical(expected_max_diff=2e-4) def test_save_load_local(self): # increase tolerance from 1e-4 -> 2e-4 to account for large composite model super().test_save_load_local(expected_max_difference=2e-4) def test_save_load_optional_components(self): # increase tolerance from 1e-4 -> 2e-4 to account for large composite model super().test_save_load_optional_components(expected_max_difference=2e-4) def test_to_dtype(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) # The method component.dtype returns the dtype of the first parameter registered in the model, not the # dtype of the entire model. In the case of CLAP, the first parameter is a float64 constant (logit scale) model_dtypes = {key: component.dtype for key, component in components.items() if hasattr(component, "dtype")} # Without the logit scale parameters, everything is float32 model_dtypes.pop("text_encoder") self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes.values())) # the CLAP sub-models are float32 model_dtypes["clap_text_branch"] = components["text_encoder"].text_model.dtype self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes.values())) # Once we send to fp16, all params are in half-precision, including the logit scale pipe.to(torch_dtype=torch.float16) model_dtypes = {key: component.dtype for key, component in components.items() if hasattr(component, "dtype")} self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes.values())) def test_sequential_cpu_offload_forward_pass(self): pass @nightly class AudioLDM2PipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 8, 128, 16)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "A hammer hitting a wooden surface", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 2.5, } return inputs def test_audioldm2(self): audioldm_pipe = AudioLDM2Pipeline.from_pretrained("cvssp/audioldm2") audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 25 audio = audioldm_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81952 # check the portion of the generated audio with the largest dynamic range (reduces flakiness) audio_slice = audio[17275:17285] expected_slice = np.array([0.0791, 0.0666, 0.1158, 0.1227, 0.1171, -0.2880, -0.1940, -0.0283, -0.0126, 0.1127]) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 1e-3 def test_audioldm2_lms(self): audioldm_pipe = AudioLDM2Pipeline.from_pretrained("cvssp/audioldm2") audioldm_pipe.scheduler = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) audio = audioldm_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81952 # check the portion of the generated audio with the largest dynamic range (reduces flakiness) audio_slice = audio[31390:31400] expected_slice = np.array( [-0.1318, -0.0577, 0.0446, -0.0573, 0.0659, 0.1074, -0.2600, 0.0080, -0.2190, -0.4301] ) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 1e-3 def test_audioldm2_large(self): audioldm_pipe = AudioLDM2Pipeline.from_pretrained("cvssp/audioldm2-large") audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) audio = audioldm_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81952 # check the portion of the generated audio with the largest dynamic range (reduces flakiness) audio_slice = audio[8825:8835] expected_slice = np.array( [-0.1829, -0.1461, 0.0759, -0.1493, -0.1396, 0.5783, 0.3001, -0.3038, -0.0639, -0.2244] ) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/spectrogram_diffusion/test_spectrogram_diffusion.py
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from diffusers import DDPMScheduler, MidiProcessor, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, T5FilmDecoder from diffusers.utils.testing_utils import ( enable_full_determinism, nightly, require_note_seq, require_onnxruntime, require_torch_gpu, skip_mps, torch_device, ) from ..pipeline_params import TOKENS_TO_AUDIO_GENERATION_BATCH_PARAMS, TOKENS_TO_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() MIDI_FILE = "./tests/fixtures/elise_format0.mid" # The note-seq package throws an error on import because the default installed version of Ipython # is not compatible with python 3.8 which we run in the CI. # https://github.com/huggingface/diffusers/actions/runs/4830121056/jobs/8605954838#step:7:98 @unittest.skip("The note-seq package currently throws an error on import") class SpectrogramDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = SpectrogramDiffusionPipeline required_optional_params = PipelineTesterMixin.required_optional_params - { "callback", "latents", "callback_steps", "output_type", "num_images_per_prompt", } test_attention_slicing = False batch_params = TOKENS_TO_AUDIO_GENERATION_PARAMS params = TOKENS_TO_AUDIO_GENERATION_BATCH_PARAMS def get_dummy_components(self): torch.manual_seed(0) notes_encoder = SpectrogramNotesEncoder( max_length=2048, vocab_size=1536, d_model=768, dropout_rate=0.1, num_layers=1, num_heads=1, d_kv=4, d_ff=2048, feed_forward_proj="gated-gelu", ) continuous_encoder = SpectrogramContEncoder( input_dims=128, targets_context_length=256, d_model=768, dropout_rate=0.1, num_layers=1, num_heads=1, d_kv=4, d_ff=2048, feed_forward_proj="gated-gelu", ) decoder = T5FilmDecoder( input_dims=128, targets_length=256, max_decoder_noise_time=20000.0, d_model=768, num_layers=1, num_heads=1, d_kv=4, d_ff=2048, dropout_rate=0.1, ) scheduler = DDPMScheduler() components = { "notes_encoder": notes_encoder.eval(), "continuous_encoder": continuous_encoder.eval(), "decoder": decoder.eval(), "scheduler": scheduler, "melgan": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "input_tokens": [ [1134, 90, 1135, 1133, 1080, 112, 1132, 1080, 1133, 1079, 133, 1132, 1079, 1133, 1] + [0] * 2033 ], "generator": generator, "num_inference_steps": 4, "output_type": "mel", } return inputs def test_spectrogram_diffusion(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = SpectrogramDiffusionPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = pipe(**inputs) mel = output.audios mel_slice = mel[0, -3:, -3:] assert mel_slice.shape == (3, 3) expected_slice = np.array( [-11.512925, -4.788215, -0.46172905, -2.051715, -10.539147, -10.970963, -9.091634, 4.0, 4.0] ) assert np.abs(mel_slice.flatten() - expected_slice).max() < 1e-2 @skip_mps def test_save_load_local(self): return super().test_save_load_local() @skip_mps def test_dict_tuple_outputs_equivalent(self): return super().test_dict_tuple_outputs_equivalent() @skip_mps def test_save_load_optional_components(self): return super().test_save_load_optional_components() @skip_mps def test_attention_slicing_forward_pass(self): return super().test_attention_slicing_forward_pass() def test_inference_batch_single_identical(self): pass def test_inference_batch_consistent(self): pass @skip_mps def test_progress_bar(self): return super().test_progress_bar() @nightly @require_torch_gpu @require_onnxruntime @require_note_seq class PipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_callback(self): # TODO - test that pipeline can decode tokens in a callback # so that music can be played live device = torch_device pipe = SpectrogramDiffusionPipeline.from_pretrained("google/music-spectrogram-diffusion") melgan = pipe.melgan pipe.melgan = None pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) def callback(step, mel_output): # decode mel to audio audio = melgan(input_features=mel_output.astype(np.float32))[0] assert len(audio[0]) == 81920 * (step + 1) # simulate that audio is played return audio processor = MidiProcessor() input_tokens = processor(MIDI_FILE) input_tokens = input_tokens[:3] generator = torch.manual_seed(0) pipe(input_tokens, num_inference_steps=5, generator=generator, callback=callback, output_type="mel") def test_spectrogram_fast(self): device = torch_device pipe = SpectrogramDiffusionPipeline.from_pretrained("google/music-spectrogram-diffusion") pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) processor = MidiProcessor() input_tokens = processor(MIDI_FILE) # just run two denoising loops input_tokens = input_tokens[:2] generator = torch.manual_seed(0) output = pipe(input_tokens, num_inference_steps=2, generator=generator) audio = output.audios[0] assert abs(np.abs(audio).sum() - 3612.841) < 1e-1 def test_spectrogram(self): device = torch_device pipe = SpectrogramDiffusionPipeline.from_pretrained("google/music-spectrogram-diffusion") pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) processor = MidiProcessor() input_tokens = processor(MIDI_FILE) # just run 4 denoising loops input_tokens = input_tokens[:4] generator = torch.manual_seed(0) output = pipe(input_tokens, num_inference_steps=100, generator=generator) audio = output.audios[0] assert abs(np.abs(audio).sum() - 9389.1111) < 5e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/musicldm/test_musicldm.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import ( ClapAudioConfig, ClapConfig, ClapFeatureExtractor, ClapModel, ClapTextConfig, RobertaTokenizer, SpeechT5HifiGan, SpeechT5HifiGanConfig, ) from diffusers import ( AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, MusicLDMPipeline, PNDMScheduler, UNet2DConditionModel, ) from diffusers.utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class MusicLDMPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = MusicLDMPipeline params = TEXT_TO_AUDIO_PARAMS batch_params = TEXT_TO_AUDIO_BATCH_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=(32, 64), class_embed_type="simple_projection", projection_class_embeddings_input_dim=32, class_embeddings_concat=True, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=1, out_channels=1, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_branch_config = ClapTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=16, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=2, num_hidden_layers=2, pad_token_id=1, vocab_size=1000, ) audio_branch_config = ClapAudioConfig( spec_size=64, window_size=4, num_mel_bins=64, intermediate_size=37, layer_norm_eps=1e-05, depths=[2, 2], num_attention_heads=[2, 2], num_hidden_layers=2, hidden_size=192, patch_size=2, patch_stride=2, patch_embed_input_channels=4, ) text_encoder_config = ClapConfig.from_text_audio_configs( text_config=text_branch_config, audio_config=audio_branch_config, projection_dim=32 ) text_encoder = ClapModel(text_encoder_config) tokenizer = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta", model_max_length=77) feature_extractor = ClapFeatureExtractor.from_pretrained( "hf-internal-testing/tiny-random-ClapModel", hop_length=7900 ) torch.manual_seed(0) vocoder_config = SpeechT5HifiGanConfig( model_in_dim=8, sampling_rate=16000, upsample_initial_channel=16, upsample_rates=[2, 2], upsample_kernel_sizes=[4, 4], resblock_kernel_sizes=[3, 7], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]], normalize_before=False, ) vocoder = SpeechT5HifiGan(vocoder_config) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "feature_extractor": feature_extractor, "vocoder": vocoder, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A hammer hitting a wooden surface", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, } return inputs def test_musicldm_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() musicldm_pipe = MusicLDMPipeline(**components) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = musicldm_pipe(**inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) == 256 audio_slice = audio[:10] expected_slice = np.array( [-0.0027, -0.0036, -0.0037, -0.0020, -0.0035, -0.0019, -0.0037, -0.0020, -0.0038, -0.0019] ) assert np.abs(audio_slice - expected_slice).max() < 1e-4 def test_musicldm_prompt_embeds(self): components = self.get_dummy_components() musicldm_pipe = MusicLDMPipeline(**components) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = musicldm_pipe(**inputs) audio_1 = output.audios[0] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] text_inputs = musicldm_pipe.tokenizer( prompt, padding="max_length", max_length=musicldm_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) prompt_embeds = musicldm_pipe.text_encoder.get_text_features(text_inputs) inputs["prompt_embeds"] = prompt_embeds # forward output = musicldm_pipe(**inputs) audio_2 = output.audios[0] assert np.abs(audio_1 - audio_2).max() < 1e-2 def test_musicldm_negative_prompt_embeds(self): components = self.get_dummy_components() musicldm_pipe = MusicLDMPipeline(**components) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = musicldm_pipe(**inputs) audio_1 = output.audios[0] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] embeds = [] for p in [prompt, negative_prompt]: text_inputs = musicldm_pipe.tokenizer( p, padding="max_length", max_length=musicldm_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) text_embeds = musicldm_pipe.text_encoder.get_text_features( text_inputs, ) embeds.append(text_embeds) inputs["prompt_embeds"], inputs["negative_prompt_embeds"] = embeds # forward output = musicldm_pipe(**inputs) audio_2 = output.audios[0] assert np.abs(audio_1 - audio_2).max() < 1e-2 def test_musicldm_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) musicldm_pipe = MusicLDMPipeline(**components) musicldm_pipe = musicldm_pipe.to(device) musicldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "egg cracking" output = musicldm_pipe(**inputs, negative_prompt=negative_prompt) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) == 256 audio_slice = audio[:10] expected_slice = np.array( [-0.0027, -0.0036, -0.0037, -0.0019, -0.0035, -0.0018, -0.0037, -0.0021, -0.0038, -0.0018] ) assert np.abs(audio_slice - expected_slice).max() < 1e-4 def test_musicldm_num_waveforms_per_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) musicldm_pipe = MusicLDMPipeline(**components) musicldm_pipe = musicldm_pipe.to(device) musicldm_pipe.set_progress_bar_config(disable=None) prompt = "A hammer hitting a wooden surface" # test num_waveforms_per_prompt=1 (default) audios = musicldm_pipe(prompt, num_inference_steps=2).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts batch_size = 2 audios = musicldm_pipe([prompt] * batch_size, num_inference_steps=2).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt num_waveforms_per_prompt = 2 audios = musicldm_pipe(prompt, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts batch_size = 2 audios = musicldm_pipe( [prompt] * batch_size, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def test_musicldm_audio_length_in_s(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() musicldm_pipe = MusicLDMPipeline(**components) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe.set_progress_bar_config(disable=None) vocoder_sampling_rate = musicldm_pipe.vocoder.config.sampling_rate inputs = self.get_dummy_inputs(device) output = musicldm_pipe(audio_length_in_s=0.016, **inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) / vocoder_sampling_rate == 0.016 output = musicldm_pipe(audio_length_in_s=0.032, **inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) / vocoder_sampling_rate == 0.032 def test_musicldm_vocoder_model_in_dim(self): components = self.get_dummy_components() musicldm_pipe = MusicLDMPipeline(**components) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe.set_progress_bar_config(disable=None) prompt = ["hey"] output = musicldm_pipe(prompt, num_inference_steps=1) audio_shape = output.audios.shape assert audio_shape == (1, 256) config = musicldm_pipe.vocoder.config config.model_in_dim *= 2 musicldm_pipe.vocoder = SpeechT5HifiGan(config).to(torch_device) output = musicldm_pipe(prompt, num_inference_steps=1) audio_shape = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def test_attention_slicing_forward_pass(self): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=False) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical() @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False) def test_to_dtype(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) # The method component.dtype returns the dtype of the first parameter registered in the model, not the # dtype of the entire model. In the case of CLAP, the first parameter is a float64 constant (logit scale) model_dtypes = {key: component.dtype for key, component in components.items() if hasattr(component, "dtype")} # Without the logit scale parameters, everything is float32 model_dtypes.pop("text_encoder") self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes.values())) # the CLAP sub-models are float32 model_dtypes["clap_text_branch"] = components["text_encoder"].text_model.dtype self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes.values())) # Once we send to fp16, all params are in half-precision, including the logit scale pipe.to(torch_dtype=torch.float16) model_dtypes = {key: component.dtype for key, component in components.items() if hasattr(component, "dtype")} self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes.values())) @nightly @require_torch_gpu class MusicLDMPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 8, 128, 16)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "A hammer hitting a wooden surface", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 2.5, } return inputs def test_musicldm(self): musicldm_pipe = MusicLDMPipeline.from_pretrained("cvssp/musicldm") musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 25 audio = musicldm_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81952 # check the portion of the generated audio with the largest dynamic range (reduces flakiness) audio_slice = audio[8680:8690] expected_slice = np.array( [-0.1042, -0.1068, -0.1235, -0.1387, -0.1428, -0.136, -0.1213, -0.1097, -0.0967, -0.0945] ) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 1e-3 def test_musicldm_lms(self): musicldm_pipe = MusicLDMPipeline.from_pretrained("cvssp/musicldm") musicldm_pipe.scheduler = LMSDiscreteScheduler.from_config(musicldm_pipe.scheduler.config) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) audio = musicldm_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81952 # check the portion of the generated audio with the largest dynamic range (reduces flakiness) audio_slice = audio[58020:58030] expected_slice = np.array([0.3592, 0.3477, 0.4084, 0.4665, 0.5048, 0.5891, 0.6461, 0.5579, 0.4595, 0.4403]) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from parameterized import parameterized from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, LCMScheduler, MultiAdapter, StableDiffusionXLAdapterPipeline, T2IAdapter, UNet2DConditionModel, ) from diffusers.utils import load_image, logging from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class StableDiffusionXLAdapterPipelineFastTests( PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionXLAdapterPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS def get_dummy_components(self, adapter_type="full_adapter_xl", time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, time_cond_proj_dim=time_cond_proj_dim, ) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") if adapter_type == "full_adapter_xl": adapter = T2IAdapter( in_channels=3, channels=[32, 64], num_res_blocks=2, downscale_factor=4, adapter_type=adapter_type, ) elif adapter_type == "multi_adapter": adapter = MultiAdapter( [ T2IAdapter( in_channels=3, channels=[32, 64], num_res_blocks=2, downscale_factor=4, adapter_type="full_adapter_xl", ), T2IAdapter( in_channels=3, channels=[32, 64], num_res_blocks=2, downscale_factor=4, adapter_type="full_adapter_xl", ), ] ) else: raise ValueError( f"Unknown adapter type: {adapter_type}, must be one of 'full_adapter_xl', or 'multi_adapter''" ) components = { "adapter": adapter, "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, # "safety_checker": None, # "feature_extractor": None, } return components def get_dummy_components_with_full_downscaling(self, adapter_type="full_adapter_xl"): """Get dummy components with x8 VAE downscaling and 3 UNet down blocks. These dummy components are intended to fully-exercise the T2I-Adapter downscaling behavior. """ torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=2, use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=1, projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 32, 32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") if adapter_type == "full_adapter_xl": adapter = T2IAdapter( in_channels=3, channels=[32, 32, 64], num_res_blocks=2, downscale_factor=16, adapter_type=adapter_type, ) elif adapter_type == "multi_adapter": adapter = MultiAdapter( [ T2IAdapter( in_channels=3, channels=[32, 32, 64], num_res_blocks=2, downscale_factor=16, adapter_type="full_adapter_xl", ), T2IAdapter( in_channels=3, channels=[32, 32, 64], num_res_blocks=2, downscale_factor=16, adapter_type="full_adapter_xl", ), ] ) else: raise ValueError( f"Unknown adapter type: {adapter_type}, must be one of 'full_adapter_xl', or 'multi_adapter''" ) components = { "adapter": adapter, "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, # "safety_checker": None, # "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0, height=64, width=64, num_images=1): if num_images == 1: image = floats_tensor((1, 3, height, width), rng=random.Random(seed)).to(device) else: image = [ floats_tensor((1, 3, height, width), rng=random.Random(seed)).to(device) for _ in range(num_images) ] if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "output_type": "numpy", } return inputs def test_stable_diffusion_adapter_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionXLAdapterPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.5752919, 0.6022097, 0.4728038, 0.49861962, 0.57084894, 0.4644975, 0.5193715, 0.5133664, 0.4729858] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 @parameterized.expand( [ # (dim=144) The internal feature map will be 9x9 after initial pixel unshuffling (downscaled x16). (((4 * 2 + 1) * 16),), # (dim=160) The internal feature map will be 5x5 after the first T2I down block (downscaled x32). (((4 * 1 + 1) * 32),), ] ) def test_multiple_image_dimensions(self, dim): """Test that the T2I-Adapter pipeline supports any input dimension that is divisible by the adapter's `downscale_factor`. This test was added in response to an issue where the T2I Adapter's downscaling padding behavior did not match the UNet's behavior. Note that we have selected `dim` values to produce odd resolutions at each downscaling level. """ components = self.get_dummy_components_with_full_downscaling() sd_pipe = StableDiffusionXLAdapterPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device, height=dim, width=dim) image = sd_pipe(**inputs).images assert image.shape == (1, dim, dim, 3) @parameterized.expand(["full_adapter", "full_adapter_xl", "light_adapter"]) def test_total_downscale_factor(self, adapter_type): """Test that the T2IAdapter correctly reports its total_downscale_factor.""" batch_size = 1 in_channels = 3 out_channels = [320, 640, 1280, 1280] in_image_size = 512 adapter = T2IAdapter( in_channels=in_channels, channels=out_channels, num_res_blocks=2, downscale_factor=8, adapter_type=adapter_type, ) adapter.to(torch_device) in_image = floats_tensor((batch_size, in_channels, in_image_size, in_image_size)).to(torch_device) adapter_state = adapter(in_image) # Assume that the last element in `adapter_state` has been downsampled the most, and check # that it matches the `total_downscale_factor`. expected_out_image_size = in_image_size // adapter.total_downscale_factor assert adapter_state[-1].shape == ( batch_size, out_channels[-1], expected_out_image_size, expected_out_image_size, ) def test_save_load_optional_components(self): return self._test_save_load_optional_components() def test_adapter_sdxl_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLAdapterPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5425, 0.5385, 0.4964, 0.5045, 0.6149, 0.4974, 0.5469, 0.5332, 0.5426]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_adapter_sdxl_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLAdapterPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5425, 0.5385, 0.4964, 0.5045, 0.6149, 0.4974, 0.5469, 0.5332, 0.5426]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 class StableDiffusionXLMultiAdapterPipelineFastTests( StableDiffusionXLAdapterPipelineFastTests, PipelineTesterMixin, unittest.TestCase ): def get_dummy_components(self, time_cond_proj_dim=None): return super().get_dummy_components("multi_adapter", time_cond_proj_dim=time_cond_proj_dim) def get_dummy_components_with_full_downscaling(self): return super().get_dummy_components_with_full_downscaling("multi_adapter") def get_dummy_inputs(self, device, seed=0, height=64, width=64): inputs = super().get_dummy_inputs(device, seed, height, width, num_images=2) inputs["adapter_conditioning_scale"] = [0.5, 0.5] return inputs def test_stable_diffusion_adapter_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionXLAdapterPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.5813032, 0.60995954, 0.47563356, 0.5056669, 0.57199144, 0.4631841, 0.5176794, 0.51252556, 0.47183886] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 def test_inference_batch_consistent( self, batch_sizes=[2, 4, 13], additional_params_copy_to_batched_inputs=["num_inference_steps"] ): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) logger = logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # batchify inputs for batch_size in batch_sizes: batched_inputs = {} for name, value in inputs.items(): if name in self.batch_params: # prompt is string if name == "prompt": len_prompt = len(value) # make unequal batch sizes batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] # make last batch super long batched_inputs[name][-1] = 100 * "very long" elif name == "image": batched_images = [] for image in value: batched_images.append(batch_size * [image]) batched_inputs[name] = batched_images else: batched_inputs[name] = batch_size * [value] elif name == "batch_size": batched_inputs[name] = batch_size else: batched_inputs[name] = value for arg in additional_params_copy_to_batched_inputs: batched_inputs[arg] = inputs[arg] batched_inputs["output_type"] = "np" output = pipe(**batched_inputs) assert len(output[0]) == batch_size batched_inputs["output_type"] = "np" output = pipe(**batched_inputs)[0] assert output.shape[0] == batch_size logger.setLevel(level=diffusers.logging.WARNING) def test_num_images_per_prompt(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) batch_sizes = [1, 2] num_images_per_prompts = [1, 2] for batch_size in batch_sizes: for num_images_per_prompt in num_images_per_prompts: inputs = self.get_dummy_inputs(torch_device) for key in inputs.keys(): if key in self.batch_params: if key == "image": batched_images = [] for image in inputs[key]: batched_images.append(batch_size * [image]) inputs[key] = batched_images else: inputs[key] = batch_size * [inputs[key]] images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0] assert images.shape[0] == batch_size * num_images_per_prompt def test_inference_batch_single_identical( self, batch_size=3, test_max_difference=None, test_mean_pixel_difference=None, relax_max_difference=False, expected_max_diff=2e-3, additional_params_copy_to_batched_inputs=["num_inference_steps"], ): if test_max_difference is None: # TODO(Pedro) - not sure why, but not at all reproducible at the moment it seems # make sure that batched and non-batched is identical test_max_difference = torch_device != "mps" if test_mean_pixel_difference is None: # TODO same as above test_mean_pixel_difference = torch_device != "mps" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) logger = logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # batchify inputs batched_inputs = {} batch_size = batch_size for name, value in inputs.items(): if name in self.batch_params: # prompt is string if name == "prompt": len_prompt = len(value) # make unequal batch sizes batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] # make last batch super long batched_inputs[name][-1] = 100 * "very long" elif name == "image": batched_images = [] for image in value: batched_images.append(batch_size * [image]) batched_inputs[name] = batched_images else: batched_inputs[name] = batch_size * [value] elif name == "batch_size": batched_inputs[name] = batch_size elif name == "generator": batched_inputs[name] = [self.get_generator(i) for i in range(batch_size)] else: batched_inputs[name] = value for arg in additional_params_copy_to_batched_inputs: batched_inputs[arg] = inputs[arg] output_batch = pipe(**batched_inputs) assert output_batch[0].shape[0] == batch_size inputs["generator"] = self.get_generator(0) output = pipe(**inputs) logger.setLevel(level=diffusers.logging.WARNING) if test_max_difference: if relax_max_difference: # Taking the median of the largest <n> differences # is resilient to outliers diff = np.abs(output_batch[0][0] - output[0][0]) diff = diff.flatten() diff.sort() max_diff = np.median(diff[-5:]) else: max_diff = np.abs(output_batch[0][0] - output[0][0]).max() assert max_diff < expected_max_diff if test_mean_pixel_difference: assert_mean_pixel_difference(output_batch[0][0], output[0][0]) def test_adapter_sdxl_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLAdapterPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5313, 0.5375, 0.4942, 0.5021, 0.6142, 0.4968, 0.5434, 0.5311, 0.5448]) debug = [str(round(i, 4)) for i in image_slice.flatten().tolist()] print(",".join(debug)) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_adapter_sdxl_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLAdapterPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5313, 0.5375, 0.4942, 0.5021, 0.6142, 0.4968, 0.5434, 0.5311, 0.5448]) debug = [str(round(i, 4)) for i in image_slice.flatten().tolist()] print(",".join(debug)) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @slow @require_torch_gpu class AdapterSDXLPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_canny_lora(self): adapter = T2IAdapter.from_pretrained("TencentARC/t2i-adapter-lineart-sdxl-1.0", torch_dtype=torch.float16).to( "cpu" ) pipe = StableDiffusionXLAdapterPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", adapter=adapter, torch_dtype=torch.float16, variant="fp16", ) pipe.load_lora_weights("CiroN2022/toy-face", weight_name="toy_face_sdxl.safetensors") pipe.enable_sequential_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "toy" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/toy_canny.png" ) images = pipe(prompt, image=image, generator=generator, output_type="np", num_inference_steps=3).images assert images[0].shape == (768, 512, 3) original_image = images[0, -3:, -3:, -1].flatten() expected_image = np.array( [0.50346327, 0.50708383, 0.50719553, 0.5135172, 0.5155377, 0.5066059, 0.49680984, 0.5005894, 0.48509413] ) assert numpy_cosine_similarity_distance(original_image, expected_image) < 1e-4
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_img2img.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( AutoencoderKL, AutoencoderTiny, EulerDiscreteScheduler, LCMScheduler, StableDiffusionXLImg2ImgPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, require_torch_gpu, torch_device, ) from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin enable_full_determinism() class StableDiffusionXLImg2ImgPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_class = StableDiffusionXLImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union( {"add_text_embeds", "add_time_ids", "add_neg_time_ids"} ) def get_dummy_components(self, skip_first_text_encoder=False, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, time_cond_proj_dim=time_cond_proj_dim, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=72, # 5 * 8 + 32 cross_attention_dim=64 if not skip_first_text_encoder else 32, ) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) image_encoder_config = CLIPVisionConfig( hidden_size=32, image_size=224, projection_dim=32, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=14, ) image_encoder = CLIPVisionModelWithProjection(image_encoder_config) feature_extractor = CLIPImageProcessor( crop_size=224, do_center_crop=True, do_normalize=True, do_resize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], resample=3, size=224, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder if not skip_first_text_encoder else None, "tokenizer": tokenizer if not skip_first_text_encoder else None, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "requires_aesthetics_score": True, "image_encoder": image_encoder, "feature_extractor": feature_extractor, } return components def get_dummy_tiny_autoencoder(self): return AutoencoderTiny(in_channels=3, out_channels=3, latent_channels=4) def test_components_function(self): init_components = self.get_dummy_components() init_components.pop("requires_aesthetics_score") pipe = self.pipeline_class(**init_components) self.assertTrue(hasattr(pipe, "components")) self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image / 2 + 0.5 if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "output_type": "np", "strength": 0.8, } return inputs def test_stable_diffusion_xl_img2img_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4664, 0.4886, 0.4403, 0.6902, 0.5592, 0.4534, 0.5931, 0.5951, 0.5224]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_img2img_euler_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.5604, 0.4352, 0.4717, 0.5844, 0.5101, 0.6704, 0.6290, 0.5460, 0.5286]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_img2img_euler_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.5604, 0.4352, 0.4717, 0.5844, 0.5101, 0.6704, 0.6290, 0.5460, 0.5286]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) # TODO(Patrick, Sayak) - skip for now as this requires more refiner tests def test_save_load_optional_components(self): pass def test_stable_diffusion_xl_img2img_negative_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) negative_prompt = 3 * ["this is a negative prompt"] prompt = 3 * [inputs.pop("prompt")] ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = sd_pipe.encode_prompt(prompt, negative_prompt=negative_prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_stable_diffusion_xl_img2img_tiny_autoencoder(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe.vae = self.get_dummy_tiny_autoencoder() sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.0, 0.0, 0.0106, 0.0, 0.0, 0.0087, 0.0052, 0.0062, 0.0177]) assert np.allclose(image_slice, expected_slice, atol=1e-4, rtol=1e-4) @require_torch_gpu def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_stable_diffusion_xl_multi_prompts(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) # forward with single prompt generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["num_inference_steps"] = 5 output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same prompt duplicated generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["num_inference_steps"] = 5 inputs["prompt_2"] = inputs["prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different prompt generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["num_inference_steps"] = 5 inputs["prompt_2"] = "different prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # manually set a negative_prompt generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["num_inference_steps"] = 5 inputs["negative_prompt"] = "negative prompt" output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same negative_prompt duplicated generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["num_inference_steps"] = 5 inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = inputs["negative_prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different negative_prompt generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["num_inference_steps"] = 5 inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = "different negative prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 def test_stable_diffusion_xl_img2img_negative_conditions(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice_with_no_neg_conditions = image[0, -3:, -3:, -1] image = sd_pipe( **inputs, negative_original_size=(512, 512), negative_crops_coords_top_left=( 0, 0, ), negative_target_size=(1024, 1024), ).images image_slice_with_neg_conditions = image[0, -3:, -3:, -1] assert ( np.abs(image_slice_with_no_neg_conditions.flatten() - image_slice_with_neg_conditions.flatten()).max() > 1e-4 ) class StableDiffusionXLImg2ImgRefinerOnlyPipelineFastTests( PipelineLatentTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionXLImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=72, # 5 * 8 + 32 cross_attention_dim=32, ) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "tokenizer": None, "text_encoder": None, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "requires_aesthetics_score": True, "image_encoder": None, "feature_extractor": None, } return components def test_components_function(self): init_components = self.get_dummy_components() init_components.pop("requires_aesthetics_score") pipe = self.pipeline_class(**init_components) self.assertTrue(hasattr(pipe, "components")) self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image / 2 + 0.5 if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "output_type": "np", "strength": 0.8, } return inputs def test_stable_diffusion_xl_img2img_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4745, 0.4924, 0.4338, 0.6468, 0.5547, 0.4419, 0.5646, 0.5897, 0.5146]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @require_torch_gpu def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_stable_diffusion_xl_img2img_negative_conditions(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice_with_no_neg_conditions = image[0, -3:, -3:, -1] image = sd_pipe( **inputs, negative_original_size=(512, 512), negative_crops_coords_top_left=( 0, 0, ), negative_target_size=(1024, 1024), ).images image_slice_with_neg_conditions = image[0, -3:, -3:, -1] assert ( np.abs(image_slice_with_no_neg_conditions.flatten() - image_slice_with_neg_conditions.flatten()).max() > 1e-4 ) def test_stable_diffusion_xl_img2img_negative_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) negative_prompt = 3 * ["this is a negative prompt"] prompt = 3 * [inputs.pop("prompt")] ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = sd_pipe.encode_prompt(prompt, negative_prompt=negative_prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_stable_diffusion_xl_img2img_prompt_embeds_only(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["prompt"] = 3 * [inputs["prompt"]] output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) prompt = 3 * [inputs.pop("prompt")] ( prompt_embeds, _, pooled_prompt_embeds, _, ) = sd_pipe.encode_prompt(prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) def test_save_load_optional_components(self): self._test_save_load_optional_components()
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_instruction_pix2pix.py
# coding=utf-8 # Copyright 2023 Harutatsu Akiyama and HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, UNet2DConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_instruct_pix2pix import ( StableDiffusionXLInstructPix2PixPipeline, ) from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, torch_device from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, ) enable_full_determinism() class StableDiffusionXLInstructPix2PixPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, unittest.TestCase, ): pipeline_class = StableDiffusionXLInstructPix2PixPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"} batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=8, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 5 * 8 + 32 cross_attention_dim=64, ) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, } return components def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) image = image / 2 + 0.5 if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "image_guidance_scale": 1, "output_type": "numpy", } return inputs def test_components_function(self): init_components = self.get_dummy_components() pipe = self.pipeline_class(**init_components) self.assertTrue(hasattr(pipe, "components")) self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=2e-3) # Overwrite the default test_latents_inputs because pix2pix encode the image differently def test_latents_input(self): components = self.get_dummy_components() pipe = StableDiffusionXLInstructPix2PixPipeline(**components) pipe.image_processor = VaeImageProcessor(do_resize=False, do_normalize=False) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) out = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="pt"))[0] vae = components["vae"] inputs = self.get_dummy_inputs_by_type(torch_device, input_image_type="pt") for image_param in self.image_latents_params: if image_param in inputs.keys(): inputs[image_param] = vae.encode(inputs[image_param]).latent_dist.mode() out_latents_inputs = pipe(**inputs)[0] max_diff = np.abs(out - out_latents_inputs).max() self.assertLess(max_diff, 1e-4, "passing latents as image input generate different result from passing image") def test_cfg(self): pass def test_save_load_optional_components(self): self._test_save_load_optional_components()
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, LCMScheduler, StableDiffusionXLImg2ImgPipeline, StableDiffusionXLPipeline, UNet2DConditionModel, UniPCMultistepScheduler, ) from diffusers.utils.testing_utils import ( enable_full_determinism, load_image, numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin enable_full_determinism() class StableDiffusionXLPipelineFastTests( PipelineLatentTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionXLPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"add_text_embeds", "add_time_ids"}) def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(2, 4), layers_per_block=2, time_cond_proj_dim=time_cond_proj_dim, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, norm_num_groups=1, ) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "image_encoder": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "output_type": "np", } return inputs def test_stable_diffusion_xl_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5552, 0.5569, 0.4725, 0.4348, 0.4994, 0.4632, 0.5142, 0.5012, 0.47]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_euler_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4917, 0.6555, 0.4348, 0.5219, 0.7324, 0.4855, 0.5168, 0.5447, 0.5156]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_euler_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4917, 0.6555, 0.4348, 0.5219, 0.7324, 0.4855, 0.5168, 0.5447, 0.5156]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 2 * [inputs["prompt"]] inputs["num_images_per_prompt"] = 2 output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds inputs = self.get_dummy_inputs(torch_device) prompt = 2 * [inputs.pop("prompt")] ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = sd_pipe.encode_prompt(prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_stable_diffusion_xl_negative_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] prompt = 3 * [inputs.pop("prompt")] ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = sd_pipe.encode_prompt(prompt, negative_prompt=negative_prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) def test_save_load_optional_components(self): self._test_save_load_optional_components() @require_torch_gpu def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_stable_diffusion_xl_img2img_prompt_embeds_only(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["prompt"] = 3 * [inputs["prompt"]] output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) prompt = 3 * [inputs.pop("prompt")] ( prompt_embeds, _, pooled_prompt_embeds, _, ) = sd_pipe.encode_prompt(prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_stable_diffusion_two_xl_mixture_of_denoiser_fast(self): components = self.get_dummy_components() pipe_1 = StableDiffusionXLPipeline(**components).to(torch_device) pipe_1.unet.set_default_attn_processor() pipe_2 = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) pipe_2.unet.set_default_attn_processor() def assert_run_mixture( num_steps, split, scheduler_cls_orig, expected_tss, num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps, ): inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = num_steps class scheduler_cls(scheduler_cls_orig): pass pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) # Let's retrieve the number of timesteps we want to use pipe_1.scheduler.set_timesteps(num_steps) expected_steps = pipe_1.scheduler.timesteps.tolist() if pipe_1.scheduler.order == 2: expected_steps_1 = list(filter(lambda ts: ts >= split, expected_tss)) expected_steps_2 = expected_steps_1[-1:] + list(filter(lambda ts: ts < split, expected_tss)) expected_steps = expected_steps_1 + expected_steps_2 else: expected_steps_1 = list(filter(lambda ts: ts >= split, expected_tss)) expected_steps_2 = list(filter(lambda ts: ts < split, expected_tss)) # now we monkey patch step `done_steps` # list into the step function for testing done_steps = [] old_step = copy.copy(scheduler_cls.step) def new_step(self, *args, **kwargs): done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` return old_step(self, *args, **kwargs) scheduler_cls.step = new_step inputs_1 = { **inputs, **{ "denoising_end": 1.0 - (split / num_train_timesteps), "output_type": "latent", }, } latents = pipe_1(**inputs_1).images[0] assert expected_steps_1 == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" inputs_2 = { **inputs, **{ "denoising_start": 1.0 - (split / num_train_timesteps), "image": latents, }, } pipe_2(**inputs_2).images[0] assert expected_steps_2 == done_steps[len(expected_steps_1) :] assert expected_steps == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" steps = 10 for split in [300, 700]: for scheduler_cls_timesteps in [ (EulerDiscreteScheduler, [901, 801, 701, 601, 501, 401, 301, 201, 101, 1]), ( HeunDiscreteScheduler, [ 901.0, 801.0, 801.0, 701.0, 701.0, 601.0, 601.0, 501.0, 501.0, 401.0, 401.0, 301.0, 301.0, 201.0, 201.0, 101.0, 101.0, 1.0, 1.0, ], ), ]: assert_run_mixture(steps, split, scheduler_cls_timesteps[0], scheduler_cls_timesteps[1]) @slow def test_stable_diffusion_two_xl_mixture_of_denoiser(self): components = self.get_dummy_components() pipe_1 = StableDiffusionXLPipeline(**components).to(torch_device) pipe_1.unet.set_default_attn_processor() pipe_2 = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) pipe_2.unet.set_default_attn_processor() def assert_run_mixture( num_steps, split, scheduler_cls_orig, expected_tss, num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps, ): inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = num_steps class scheduler_cls(scheduler_cls_orig): pass pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) # Let's retrieve the number of timesteps we want to use pipe_1.scheduler.set_timesteps(num_steps) expected_steps = pipe_1.scheduler.timesteps.tolist() if pipe_1.scheduler.order == 2: expected_steps_1 = list(filter(lambda ts: ts >= split, expected_tss)) expected_steps_2 = expected_steps_1[-1:] + list(filter(lambda ts: ts < split, expected_tss)) expected_steps = expected_steps_1 + expected_steps_2 else: expected_steps_1 = list(filter(lambda ts: ts >= split, expected_tss)) expected_steps_2 = list(filter(lambda ts: ts < split, expected_tss)) # now we monkey patch step `done_steps` # list into the step function for testing done_steps = [] old_step = copy.copy(scheduler_cls.step) def new_step(self, *args, **kwargs): done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` return old_step(self, *args, **kwargs) scheduler_cls.step = new_step inputs_1 = { **inputs, **{ "denoising_end": 1.0 - (split / num_train_timesteps), "output_type": "latent", }, } latents = pipe_1(**inputs_1).images[0] assert expected_steps_1 == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" inputs_2 = { **inputs, **{ "denoising_start": 1.0 - (split / num_train_timesteps), "image": latents, }, } pipe_2(**inputs_2).images[0] assert expected_steps_2 == done_steps[len(expected_steps_1) :] assert expected_steps == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" steps = 10 for split in [300, 500, 700]: for scheduler_cls_timesteps in [ (DDIMScheduler, [901, 801, 701, 601, 501, 401, 301, 201, 101, 1]), (EulerDiscreteScheduler, [901, 801, 701, 601, 501, 401, 301, 201, 101, 1]), (DPMSolverMultistepScheduler, [901, 811, 721, 631, 541, 451, 361, 271, 181, 91]), (UniPCMultistepScheduler, [901, 811, 721, 631, 541, 451, 361, 271, 181, 91]), ( HeunDiscreteScheduler, [ 901.0, 801.0, 801.0, 701.0, 701.0, 601.0, 601.0, 501.0, 501.0, 401.0, 401.0, 301.0, 301.0, 201.0, 201.0, 101.0, 101.0, 1.0, 1.0, ], ), ]: assert_run_mixture(steps, split, scheduler_cls_timesteps[0], scheduler_cls_timesteps[1]) steps = 25 for split in [300, 500, 700]: for scheduler_cls_timesteps in [ ( DDIMScheduler, [ 961, 921, 881, 841, 801, 761, 721, 681, 641, 601, 561, 521, 481, 441, 401, 361, 321, 281, 241, 201, 161, 121, 81, 41, 1, ], ), ( EulerDiscreteScheduler, [ 961.0, 921.0, 881.0, 841.0, 801.0, 761.0, 721.0, 681.0, 641.0, 601.0, 561.0, 521.0, 481.0, 441.0, 401.0, 361.0, 321.0, 281.0, 241.0, 201.0, 161.0, 121.0, 81.0, 41.0, 1.0, ], ), ( DPMSolverMultistepScheduler, [ 951, 913, 875, 837, 799, 761, 723, 685, 647, 609, 571, 533, 495, 457, 419, 381, 343, 305, 267, 229, 191, 153, 115, 77, 39, ], ), ( UniPCMultistepScheduler, [ 951, 913, 875, 837, 799, 761, 723, 685, 647, 609, 571, 533, 495, 457, 419, 381, 343, 305, 267, 229, 191, 153, 115, 77, 39, ], ), ( HeunDiscreteScheduler, [ 961.0, 921.0, 921.0, 881.0, 881.0, 841.0, 841.0, 801.0, 801.0, 761.0, 761.0, 721.0, 721.0, 681.0, 681.0, 641.0, 641.0, 601.0, 601.0, 561.0, 561.0, 521.0, 521.0, 481.0, 481.0, 441.0, 441.0, 401.0, 401.0, 361.0, 361.0, 321.0, 321.0, 281.0, 281.0, 241.0, 241.0, 201.0, 201.0, 161.0, 161.0, 121.0, 121.0, 81.0, 81.0, 41.0, 41.0, 1.0, 1.0, ], ), ]: assert_run_mixture(steps, split, scheduler_cls_timesteps[0], scheduler_cls_timesteps[1]) @slow def test_stable_diffusion_three_xl_mixture_of_denoiser(self): components = self.get_dummy_components() pipe_1 = StableDiffusionXLPipeline(**components).to(torch_device) pipe_1.unet.set_default_attn_processor() pipe_2 = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) pipe_2.unet.set_default_attn_processor() pipe_3 = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) pipe_3.unet.set_default_attn_processor() def assert_run_mixture( num_steps, split_1, split_2, scheduler_cls_orig, num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps, ): inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = num_steps class scheduler_cls(scheduler_cls_orig): pass pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) pipe_3.scheduler = scheduler_cls.from_config(pipe_3.scheduler.config) # Let's retrieve the number of timesteps we want to use pipe_1.scheduler.set_timesteps(num_steps) expected_steps = pipe_1.scheduler.timesteps.tolist() split_1_ts = num_train_timesteps - int(round(num_train_timesteps * split_1)) split_2_ts = num_train_timesteps - int(round(num_train_timesteps * split_2)) if pipe_1.scheduler.order == 2: expected_steps_1 = list(filter(lambda ts: ts >= split_1_ts, expected_steps)) expected_steps_2 = expected_steps_1[-1:] + list( filter(lambda ts: ts >= split_2_ts and ts < split_1_ts, expected_steps) ) expected_steps_3 = expected_steps_2[-1:] + list(filter(lambda ts: ts < split_2_ts, expected_steps)) expected_steps = expected_steps_1 + expected_steps_2 + expected_steps_3 else: expected_steps_1 = list(filter(lambda ts: ts >= split_1_ts, expected_steps)) expected_steps_2 = list(filter(lambda ts: ts >= split_2_ts and ts < split_1_ts, expected_steps)) expected_steps_3 = list(filter(lambda ts: ts < split_2_ts, expected_steps)) # now we monkey patch step `done_steps` # list into the step function for testing done_steps = [] old_step = copy.copy(scheduler_cls.step) def new_step(self, *args, **kwargs): done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` return old_step(self, *args, **kwargs) scheduler_cls.step = new_step inputs_1 = {**inputs, **{"denoising_end": split_1, "output_type": "latent"}} latents = pipe_1(**inputs_1).images[0] assert ( expected_steps_1 == done_steps ), f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" with self.assertRaises(ValueError) as cm: inputs_2 = { **inputs, **{ "denoising_start": split_2, "denoising_end": split_1, "image": latents, "output_type": "latent", }, } pipe_2(**inputs_2).images[0] assert "cannot be larger than or equal to `denoising_end`" in str(cm.exception) inputs_2 = { **inputs, **{"denoising_start": split_1, "denoising_end": split_2, "image": latents, "output_type": "latent"}, } pipe_2(**inputs_2).images[0] assert expected_steps_2 == done_steps[len(expected_steps_1) :] inputs_3 = {**inputs, **{"denoising_start": split_2, "image": latents}} pipe_3(**inputs_3).images[0] assert expected_steps_3 == done_steps[len(expected_steps_1) + len(expected_steps_2) :] assert ( expected_steps == done_steps ), f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" for steps in [7, 11, 20]: for split_1, split_2 in zip([0.19, 0.32], [0.81, 0.68]): for scheduler_cls in [ DDIMScheduler, EulerDiscreteScheduler, DPMSolverMultistepScheduler, UniPCMultistepScheduler, HeunDiscreteScheduler, ]: assert_run_mixture(steps, split_1, split_2, scheduler_cls) def test_stable_diffusion_xl_multi_prompts(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) # forward with single prompt inputs = self.get_dummy_inputs(torch_device) output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = inputs["prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different prompt inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = "different prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # manually set a negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same negative_prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = inputs["negative_prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = "different negative prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 def test_stable_diffusion_xl_negative_conditions(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice_with_no_neg_cond = image[0, -3:, -3:, -1] image = sd_pipe( **inputs, negative_original_size=(512, 512), negative_crops_coords_top_left=(0, 0), negative_target_size=(1024, 1024), ).images image_slice_with_neg_cond = image[0, -3:, -3:, -1] self.assertTrue(np.abs(image_slice_with_no_neg_cond - image_slice_with_neg_cond).max() > 1e-2) def test_stable_diffusion_xl_save_from_pretrained(self): pipes = [] components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components).to(torch_device) pipes.append(sd_pipe) with tempfile.TemporaryDirectory() as tmpdirname: sd_pipe.save_pretrained(tmpdirname) sd_pipe = StableDiffusionXLPipeline.from_pretrained(tmpdirname).to(torch_device) pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 @slow class StableDiffusionXLPipelineIntegrationTests(unittest.TestCase): def test_stable_diffusion_lcm(self): torch.manual_seed(0) unet = UNet2DConditionModel.from_pretrained( "latent-consistency/lcm-ssd-1b", torch_dtype=torch.float16, variant="fp16" ) sd_pipe = StableDiffusionXLPipeline.from_pretrained( "segmind/SSD-1B", unet=unet, torch_dtype=torch.float16, variant="fp16" ).to(torch_device) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.set_progress_bar_config(disable=None) prompt = "a red car standing on the side of the street" image = sd_pipe(prompt, num_inference_steps=4, guidance_scale=8.0).images[0] expected_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/lcm_full/stable_diffusion_ssd_1b_lcm.png" ) image = sd_pipe.image_processor.pil_to_numpy(image) expected_image = sd_pipe.image_processor.pil_to_numpy(expected_image) max_diff = numpy_cosine_similarity_distance(image.flatten(), expected_image.flatten()) assert max_diff < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_inpaint.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import random import unittest import numpy as np import torch from PIL import Image from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, LCMScheduler, StableDiffusionXLInpaintPipeline, UNet2DConditionModel, UniPCMultistepScheduler, ) from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class StableDiffusionXLInpaintPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_class = StableDiffusionXLInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset([]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess image_latents_params = frozenset([]) callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union( { "add_text_embeds", "add_time_ids", "mask", "masked_image_latents", } ) def get_dummy_components(self, skip_first_text_encoder=False, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, time_cond_proj_dim=time_cond_proj_dim, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=72, # 5 * 8 + 32 cross_attention_dim=64 if not skip_first_text_encoder else 32, ) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") torch.manual_seed(0) image_encoder_config = CLIPVisionConfig( hidden_size=32, image_size=224, projection_dim=32, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=14, ) image_encoder = CLIPVisionModelWithProjection(image_encoder_config) feature_extractor = CLIPImageProcessor( crop_size=224, do_center_crop=True, do_normalize=True, do_resize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], resample=3, size=224, ) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder if not skip_first_text_encoder else None, "tokenizer": tokenizer if not skip_first_text_encoder else None, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "image_encoder": image_encoder, "feature_extractor": feature_extractor, "requires_aesthetics_score": True, } return components def get_dummy_inputs(self, device, seed=0): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) # create mask image[8:, 8:, :] = 255 mask_image = Image.fromarray(np.uint8(image)).convert("L").resize((64, 64)) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "strength": 1.0, "output_type": "np", } return inputs def get_dummy_inputs_2images(self, device, seed=0, img_res=64): # Get random floats in [0, 1] as image with spatial size (img_res, img_res) image1 = floats_tensor((1, 3, img_res, img_res), rng=random.Random(seed)).to(device) image2 = floats_tensor((1, 3, img_res, img_res), rng=random.Random(seed + 22)).to(device) # Convert images to [-1, 1] init_image1 = 2.0 * image1 - 1.0 init_image2 = 2.0 * image2 - 1.0 # empty mask mask_image = torch.zeros((1, 1, img_res, img_res), device=device) if str(device).startswith("mps"): generator1 = torch.manual_seed(seed) generator2 = torch.manual_seed(seed) else: generator1 = torch.Generator(device=device).manual_seed(seed) generator2 = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": ["A painting of a squirrel eating a burger"] * 2, "image": [init_image1, init_image2], "mask_image": [mask_image] * 2, "generator": [generator1, generator2], "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", } return inputs def test_components_function(self): init_components = self.get_dummy_components() init_components.pop("requires_aesthetics_score") pipe = self.pipeline_class(**init_components) self.assertTrue(hasattr(pipe, "components")) self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) def test_stable_diffusion_xl_inpaint_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionXLInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.8029, 0.5523, 0.5825, 0.6003, 0.6702, 0.7018, 0.6369, 0.5955, 0.5123]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_inpaint_euler_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLInpaintPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6611, 0.5569, 0.5531, 0.5471, 0.5918, 0.6393, 0.5074, 0.5468, 0.5185]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_inpaint_euler_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLInpaintPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6611, 0.5569, 0.5531, 0.5471, 0.5918, 0.6393, 0.5074, 0.5468, 0.5185]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) # TODO(Patrick, Sayak) - skip for now as this requires more refiner tests def test_save_load_optional_components(self): pass def test_stable_diffusion_xl_inpaint_negative_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLInpaintPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] prompt = 3 * [inputs.pop("prompt")] ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = sd_pipe.encode_prompt(prompt, negative_prompt=negative_prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 @require_torch_gpu def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLInpaintPipeline(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLInpaintPipeline(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_stable_diffusion_xl_refiner(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(skip_first_text_encoder=True) sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.7045, 0.4838, 0.5454, 0.6270, 0.6168, 0.6717, 0.6484, 0.5681, 0.4922]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_two_xl_mixture_of_denoiser_fast(self): components = self.get_dummy_components() pipe_1 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipe_1.unet.set_default_attn_processor() pipe_2 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipe_2.unet.set_default_attn_processor() def assert_run_mixture( num_steps, split, scheduler_cls_orig, num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps ): inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = num_steps class scheduler_cls(scheduler_cls_orig): pass pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) # Let's retrieve the number of timesteps we want to use pipe_1.scheduler.set_timesteps(num_steps) expected_steps = pipe_1.scheduler.timesteps.tolist() split_ts = num_train_timesteps - int(round(num_train_timesteps * split)) if pipe_1.scheduler.order == 2: expected_steps_1 = list(filter(lambda ts: ts >= split_ts, expected_steps)) expected_steps_2 = expected_steps_1[-1:] + list(filter(lambda ts: ts < split_ts, expected_steps)) expected_steps = expected_steps_1 + expected_steps_2 else: expected_steps_1 = list(filter(lambda ts: ts >= split_ts, expected_steps)) expected_steps_2 = list(filter(lambda ts: ts < split_ts, expected_steps)) # now we monkey patch step `done_steps` # list into the step function for testing done_steps = [] old_step = copy.copy(scheduler_cls.step) def new_step(self, *args, **kwargs): done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` return old_step(self, *args, **kwargs) scheduler_cls.step = new_step inputs_1 = {**inputs, **{"denoising_end": split, "output_type": "latent"}} latents = pipe_1(**inputs_1).images[0] assert expected_steps_1 == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" inputs_2 = {**inputs, **{"denoising_start": split, "image": latents}} pipe_2(**inputs_2).images[0] assert expected_steps_2 == done_steps[len(expected_steps_1) :] assert expected_steps == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" for steps in [7, 20]: assert_run_mixture(steps, 0.33, EulerDiscreteScheduler) assert_run_mixture(steps, 0.33, HeunDiscreteScheduler) @slow def test_stable_diffusion_two_xl_mixture_of_denoiser(self): components = self.get_dummy_components() pipe_1 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipe_1.unet.set_default_attn_processor() pipe_2 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipe_2.unet.set_default_attn_processor() def assert_run_mixture( num_steps, split, scheduler_cls_orig, num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps ): inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = num_steps class scheduler_cls(scheduler_cls_orig): pass pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) # Let's retrieve the number of timesteps we want to use pipe_1.scheduler.set_timesteps(num_steps) expected_steps = pipe_1.scheduler.timesteps.tolist() split_ts = num_train_timesteps - int(round(num_train_timesteps * split)) if pipe_1.scheduler.order == 2: expected_steps_1 = list(filter(lambda ts: ts >= split_ts, expected_steps)) expected_steps_2 = expected_steps_1[-1:] + list(filter(lambda ts: ts < split_ts, expected_steps)) expected_steps = expected_steps_1 + expected_steps_2 else: expected_steps_1 = list(filter(lambda ts: ts >= split_ts, expected_steps)) expected_steps_2 = list(filter(lambda ts: ts < split_ts, expected_steps)) # now we monkey patch step `done_steps` # list into the step function for testing done_steps = [] old_step = copy.copy(scheduler_cls.step) def new_step(self, *args, **kwargs): done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` return old_step(self, *args, **kwargs) scheduler_cls.step = new_step inputs_1 = {**inputs, **{"denoising_end": split, "output_type": "latent"}} latents = pipe_1(**inputs_1).images[0] assert expected_steps_1 == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" inputs_2 = {**inputs, **{"denoising_start": split, "image": latents}} pipe_2(**inputs_2).images[0] assert expected_steps_2 == done_steps[len(expected_steps_1) :] assert expected_steps == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" for steps in [5, 8, 20]: for split in [0.33, 0.49, 0.71]: for scheduler_cls in [ DDIMScheduler, EulerDiscreteScheduler, DPMSolverMultistepScheduler, UniPCMultistepScheduler, HeunDiscreteScheduler, ]: assert_run_mixture(steps, split, scheduler_cls) @slow def test_stable_diffusion_three_xl_mixture_of_denoiser(self): components = self.get_dummy_components() pipe_1 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipe_1.unet.set_default_attn_processor() pipe_2 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipe_2.unet.set_default_attn_processor() pipe_3 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipe_3.unet.set_default_attn_processor() def assert_run_mixture( num_steps, split_1, split_2, scheduler_cls_orig, num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps, ): inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = num_steps class scheduler_cls(scheduler_cls_orig): pass pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) pipe_3.scheduler = scheduler_cls.from_config(pipe_3.scheduler.config) # Let's retrieve the number of timesteps we want to use pipe_1.scheduler.set_timesteps(num_steps) expected_steps = pipe_1.scheduler.timesteps.tolist() split_1_ts = num_train_timesteps - int(round(num_train_timesteps * split_1)) split_2_ts = num_train_timesteps - int(round(num_train_timesteps * split_2)) if pipe_1.scheduler.order == 2: expected_steps_1 = list(filter(lambda ts: ts >= split_1_ts, expected_steps)) expected_steps_2 = expected_steps_1[-1:] + list( filter(lambda ts: ts >= split_2_ts and ts < split_1_ts, expected_steps) ) expected_steps_3 = expected_steps_2[-1:] + list(filter(lambda ts: ts < split_2_ts, expected_steps)) expected_steps = expected_steps_1 + expected_steps_2 + expected_steps_3 else: expected_steps_1 = list(filter(lambda ts: ts >= split_1_ts, expected_steps)) expected_steps_2 = list(filter(lambda ts: ts >= split_2_ts and ts < split_1_ts, expected_steps)) expected_steps_3 = list(filter(lambda ts: ts < split_2_ts, expected_steps)) # now we monkey patch step `done_steps` # list into the step function for testing done_steps = [] old_step = copy.copy(scheduler_cls.step) def new_step(self, *args, **kwargs): done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` return old_step(self, *args, **kwargs) scheduler_cls.step = new_step inputs_1 = {**inputs, **{"denoising_end": split_1, "output_type": "latent"}} latents = pipe_1(**inputs_1).images[0] assert ( expected_steps_1 == done_steps ), f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" inputs_2 = { **inputs, **{"denoising_start": split_1, "denoising_end": split_2, "image": latents, "output_type": "latent"}, } pipe_2(**inputs_2).images[0] assert expected_steps_2 == done_steps[len(expected_steps_1) :] inputs_3 = {**inputs, **{"denoising_start": split_2, "image": latents}} pipe_3(**inputs_3).images[0] assert expected_steps_3 == done_steps[len(expected_steps_1) + len(expected_steps_2) :] assert ( expected_steps == done_steps ), f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" for steps in [7, 11, 20]: for split_1, split_2 in zip([0.19, 0.32], [0.81, 0.68]): for scheduler_cls in [ DDIMScheduler, EulerDiscreteScheduler, DPMSolverMultistepScheduler, UniPCMultistepScheduler, HeunDiscreteScheduler, ]: assert_run_mixture(steps, split_1, split_2, scheduler_cls) def test_stable_diffusion_xl_multi_prompts(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) # forward with single prompt inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 5 output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 5 inputs["prompt_2"] = inputs["prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different prompt inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 5 inputs["prompt_2"] = "different prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # manually set a negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 5 inputs["negative_prompt"] = "negative prompt" output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same negative_prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 5 inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = inputs["negative_prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 5 inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = "different negative prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 def test_stable_diffusion_xl_img2img_negative_conditions(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice_with_no_neg_conditions = image[0, -3:, -3:, -1] image = sd_pipe( **inputs, negative_original_size=(512, 512), negative_crops_coords_top_left=( 0, 0, ), negative_target_size=(1024, 1024), ).images image_slice_with_neg_conditions = image[0, -3:, -3:, -1] assert ( np.abs(image_slice_with_no_neg_conditions.flatten() - image_slice_with_neg_conditions.flatten()).max() > 1e-4 ) def test_stable_diffusion_xl_inpaint_mask_latents(self): device = "cpu" components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(device) sd_pipe.set_progress_bar_config(disable=None) # normal mask + normal image ## `image`: pil, `mask_image``: pil, `masked_image_latents``: None inputs = self.get_dummy_inputs(device) inputs["strength"] = 0.9 out_0 = sd_pipe(**inputs).images # image latents + mask latents inputs = self.get_dummy_inputs(device) image = sd_pipe.image_processor.preprocess(inputs["image"]).to(sd_pipe.device) mask = sd_pipe.mask_processor.preprocess(inputs["mask_image"]).to(sd_pipe.device) masked_image = image * (mask < 0.5) generator = torch.Generator(device=device).manual_seed(0) image_latents = sd_pipe._encode_vae_image(image, generator=generator) torch.randn((1, 4, 32, 32), generator=generator) mask_latents = sd_pipe._encode_vae_image(masked_image, generator=generator) inputs["image"] = image_latents inputs["masked_image_latents"] = mask_latents inputs["mask_image"] = mask inputs["strength"] = 0.9 generator = torch.Generator(device=device).manual_seed(0) torch.randn((1, 4, 32, 32), generator=generator) inputs["generator"] = generator out_1 = sd_pipe(**inputs).images assert np.abs(out_0 - out_1).max() < 1e-2 def test_stable_diffusion_xl_inpaint_2_images(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) # test to confirm if we pass two same image, we will get same output inputs = self.get_dummy_inputs(device) gen1 = torch.Generator(device=device).manual_seed(0) gen2 = torch.Generator(device=device).manual_seed(0) for name in ["prompt", "image", "mask_image"]: inputs[name] = [inputs[name]] * 2 inputs["generator"] = [gen1, gen2] images = sd_pipe(**inputs).images assert images.shape == (2, 64, 64, 3) image_slice1 = images[0, -3:, -3:, -1] image_slice2 = images[1, -3:, -3:, -1] assert np.abs(image_slice1.flatten() - image_slice2.flatten()).max() < 1e-4 # test to confirm that if we pass two different images, we will get different output inputs = self.get_dummy_inputs_2images(device) images = sd_pipe(**inputs).images assert images.shape == (2, 64, 64, 3) image_slice1 = images[0, -3:, -3:, -1] image_slice2 = images[1, -3:, -3:, -1] assert np.abs(image_slice1.flatten() - image_slice2.flatten()).max() > 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/versatile_diffusion/test_versatile_diffusion_text_to_image.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device torch.backends.cuda.matmul.allow_tf32 = False class VersatileDiffusionTextToImagePipelineFastTests(unittest.TestCase): pass @nightly @require_torch_gpu class VersatileDiffusionTextToImagePipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_remove_unused_weights_save_load(self): pipe = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion") # remove text_unet pipe.remove_unused_weights() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger " generator = torch.manual_seed(0) image = pipe( prompt=prompt, generator=generator, guidance_scale=7.5, num_inference_steps=2, output_type="numpy" ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(tmpdirname) pipe = VersatileDiffusionTextToImagePipeline.from_pretrained(tmpdirname) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator = generator.manual_seed(0) new_image = pipe( prompt=prompt, generator=generator, guidance_scale=7.5, num_inference_steps=2, output_type="numpy" ).images assert np.abs(image - new_image).max() < 1e-5, "Models don't have the same forward pass" def test_inference_text2img(self): pipe = VersatileDiffusionTextToImagePipeline.from_pretrained( "shi-labs/versatile-diffusion", torch_dtype=torch.float16 ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger " generator = torch.manual_seed(0) image = pipe( prompt=prompt, generator=generator, guidance_scale=7.5, num_inference_steps=50, output_type="numpy" ).images image_slice = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/versatile_diffusion/test_versatile_diffusion_image_variation.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device torch.backends.cuda.matmul.allow_tf32 = False class VersatileDiffusionImageVariationPipelineFastTests(unittest.TestCase): pass @nightly @require_torch_gpu class VersatileDiffusionImageVariationPipelineIntegrationTests(unittest.TestCase): def test_inference_image_variations(self): pipe = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion") pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) image_prompt = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) generator = torch.manual_seed(0) image = pipe( image=image_prompt, generator=generator, guidance_scale=7.5, num_inference_steps=50, output_type="numpy", ).images image_slice = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/versatile_diffusion/test_versatile_diffusion_dual_guided.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionDualGuidedPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device torch.backends.cuda.matmul.allow_tf32 = False @nightly @require_torch_gpu class VersatileDiffusionDualGuidedPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_remove_unused_weights_save_load(self): pipe = VersatileDiffusionDualGuidedPipeline.from_pretrained("shi-labs/versatile-diffusion") # remove text_unet pipe.remove_unused_weights() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) second_prompt = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) generator = torch.manual_seed(0) image = pipe( prompt="first prompt", image=second_prompt, text_to_image_strength=0.75, generator=generator, guidance_scale=7.5, num_inference_steps=2, output_type="numpy", ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(tmpdirname) pipe = VersatileDiffusionDualGuidedPipeline.from_pretrained(tmpdirname) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator = generator.manual_seed(0) new_image = pipe( prompt="first prompt", image=second_prompt, text_to_image_strength=0.75, generator=generator, guidance_scale=7.5, num_inference_steps=2, output_type="numpy", ).images assert np.abs(image - new_image).max() < 1e-5, "Models don't have the same forward pass" def test_inference_dual_guided(self): pipe = VersatileDiffusionDualGuidedPipeline.from_pretrained("shi-labs/versatile-diffusion") pipe.remove_unused_weights() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) first_prompt = "cyberpunk 2077" second_prompt = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) generator = torch.manual_seed(0) image = pipe( prompt=first_prompt, image=second_prompt, text_to_image_strength=0.75, generator=generator, guidance_scale=7.5, num_inference_steps=50, output_type="numpy", ).images image_slice = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0787, 0.0849, 0.0826, 0.0812, 0.0807, 0.0795, 0.0818, 0.0798, 0.0779]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/versatile_diffusion/test_versatile_diffusion_mega.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device torch.backends.cuda.matmul.allow_tf32 = False class VersatileDiffusionMegaPipelineFastTests(unittest.TestCase): pass @nightly @require_torch_gpu class VersatileDiffusionMegaPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_from_save_pretrained(self): pipe = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion", torch_dtype=torch.float16) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) prompt_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) generator = torch.manual_seed(0) image = pipe.dual_guided( prompt="first prompt", image=prompt_image, text_to_image_strength=0.75, generator=generator, guidance_scale=7.5, num_inference_steps=2, output_type="numpy", ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(tmpdirname) pipe = VersatileDiffusionPipeline.from_pretrained(tmpdirname, torch_dtype=torch.float16) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator = generator.manual_seed(0) new_image = pipe.dual_guided( prompt="first prompt", image=prompt_image, text_to_image_strength=0.75, generator=generator, guidance_scale=7.5, num_inference_steps=2, output_type="numpy", ).images assert np.abs(image - new_image).max() < 1e-5, "Models don't have the same forward pass" def test_inference_dual_guided_then_text_to_image(self): pipe = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion", torch_dtype=torch.float16) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) prompt = "cyberpunk 2077" init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) generator = torch.manual_seed(0) image = pipe.dual_guided( prompt=prompt, image=init_image, text_to_image_strength=0.75, generator=generator, guidance_scale=7.5, num_inference_steps=50, output_type="numpy", ).images image_slice = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 prompt = "A painting of a squirrel eating a burger " generator = torch.manual_seed(0) image = pipe.text_to_image( prompt=prompt, generator=generator, guidance_scale=7.5, num_inference_steps=50, output_type="numpy" ).images image_slice = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 image = pipe.image_variation(init_image, generator=generator, output_type="numpy").images image_slice = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
0