repo_id
stringlengths
15
89
file_path
stringlengths
27
180
content
stringlengths
1
2.23M
__index_level_0__
int64
0
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_unclip/test_stable_unclip.py
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNet2DConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, nightly, require_torch_gpu, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class StableUnCLIPPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableUnCLIPPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false test_xformers_attention = False def get_dummy_components(self): embedder_hidden_size = 32 embedder_projection_dim = embedder_hidden_size # prior components torch.manual_seed(0) prior_tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") torch.manual_seed(0) prior_text_encoder = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=embedder_hidden_size, projection_dim=embedder_projection_dim, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) ) torch.manual_seed(0) prior = PriorTransformer( num_attention_heads=2, attention_head_dim=12, embedding_dim=embedder_projection_dim, num_layers=1, ) torch.manual_seed(0) prior_scheduler = DDPMScheduler( variance_type="fixed_small_log", prediction_type="sample", num_train_timesteps=1000, clip_sample=True, clip_sample_range=5.0, beta_schedule="squaredcos_cap_v2", ) # regular denoising components torch.manual_seed(0) image_normalizer = StableUnCLIPImageNormalizer(embedding_dim=embedder_hidden_size) image_noising_scheduler = DDPMScheduler(beta_schedule="squaredcos_cap_v2") torch.manual_seed(0) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") torch.manual_seed(0) text_encoder = CLIPTextModel( CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=embedder_hidden_size, projection_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) ) torch.manual_seed(0) unet = UNet2DConditionModel( sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("UpBlock2D", "CrossAttnUpBlock2D"), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type="projection", # The class embeddings are the noise augmented image embeddings. # I.e. the image embeddings concated with the noised embeddings of the same dimension projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=embedder_hidden_size, layers_per_block=1, upcast_attention=True, use_linear_projection=True, ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, prediction_type="v_prediction", set_alpha_to_one=False, steps_offset=1, ) torch.manual_seed(0) vae = AutoencoderKL() components = { # prior components "prior_tokenizer": prior_tokenizer, "prior_text_encoder": prior_text_encoder, "prior": prior, "prior_scheduler": prior_scheduler, # image noising components "image_normalizer": image_normalizer, "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder, "unet": unet, "scheduler": scheduler, "vae": vae, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "numpy", } return inputs # Overriding PipelineTesterMixin::test_attention_slicing_forward_pass # because UnCLIP GPU undeterminism requires a looser check. def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" self._test_attention_slicing_forward_pass(test_max_difference=test_max_difference) # Overriding PipelineTesterMixin::test_inference_batch_single_identical # because UnCLIP undeterminism requires a looser check. def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-3) @nightly @require_torch_gpu class StableUnCLIPPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_unclip(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" ) pipe = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l", torch_dtype=torch.float16) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() generator = torch.Generator(device="cpu").manual_seed(0) output = pipe("anime turle", generator=generator, output_type="np") image = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(image, expected_image) def test_stable_unclip_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l", torch_dtype=torch.float16) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _ = pipe( "anime turtle", prior_num_inference_steps=2, num_inference_steps=2, output_type="np", ) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_unclip/test_stable_unclip_img2img.py
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImg2ImgPipeline, UNet2DConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, require_torch_gpu, skip_mps, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class StableUnCLIPImg2ImgPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableUnCLIPImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess image_latents_params = frozenset([]) def get_dummy_components(self): embedder_hidden_size = 32 embedder_projection_dim = embedder_hidden_size # image encoding components feature_extractor = CLIPImageProcessor(crop_size=32, size=32) torch.manual_seed(0) image_encoder = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=embedder_hidden_size, projection_dim=embedder_projection_dim, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) ) # regular denoising components torch.manual_seed(0) image_normalizer = StableUnCLIPImageNormalizer(embedding_dim=embedder_hidden_size) image_noising_scheduler = DDPMScheduler(beta_schedule="squaredcos_cap_v2") torch.manual_seed(0) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") torch.manual_seed(0) text_encoder = CLIPTextModel( CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=embedder_hidden_size, projection_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) ) torch.manual_seed(0) unet = UNet2DConditionModel( sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("UpBlock2D", "CrossAttnUpBlock2D"), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type="projection", # The class embeddings are the noise augmented image embeddings. # I.e. the image embeddings concated with the noised embeddings of the same dimension projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=embedder_hidden_size, layers_per_block=1, upcast_attention=True, use_linear_projection=True, ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, prediction_type="v_prediction", set_alpha_to_one=False, steps_offset=1, ) torch.manual_seed(0) vae = AutoencoderKL() components = { # image encoding components "feature_extractor": feature_extractor, "image_encoder": image_encoder.eval(), # image noising components "image_normalizer": image_normalizer.eval(), "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder.eval(), "unet": unet.eval(), "scheduler": scheduler, "vae": vae.eval(), } return components def get_dummy_inputs(self, device, seed=0, pil_image=True): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) input_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) if pil_image: input_image = input_image * 0.5 + 0.5 input_image = input_image.clamp(0, 1) input_image = input_image.cpu().permute(0, 2, 3, 1).float().numpy() input_image = DiffusionPipeline.numpy_to_pil(input_image)[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def test_image_embeds_none(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableUnCLIPImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs.update({"image_embeds": None}) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 # Overriding PipelineTesterMixin::test_attention_slicing_forward_pass # because GPU undeterminism requires a looser check. def test_attention_slicing_forward_pass(self): test_max_difference = torch_device in ["cpu", "mps"] self._test_attention_slicing_forward_pass(test_max_difference=test_max_difference) # Overriding PipelineTesterMixin::test_inference_batch_single_identical # because undeterminism requires a looser check. def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(test_max_difference=False) @nightly @require_torch_gpu class StableUnCLIPImg2ImgPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_unclip_l_img2img(self): input_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" ) pipe = StableUnCLIPImg2ImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-l-img2img", torch_dtype=torch.float16 ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() generator = torch.Generator(device="cpu").manual_seed(0) output = pipe(input_image, "anime turle", generator=generator, output_type="np") image = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(image, expected_image) def test_stable_unclip_h_img2img(self): input_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" ) pipe = StableUnCLIPImg2ImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img", torch_dtype=torch.float16 ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() generator = torch.Generator(device="cpu").manual_seed(0) output = pipe(input_image, "anime turle", generator=generator, output_type="np") image = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(image, expected_image) def test_stable_unclip_img2img_pipeline_with_sequential_cpu_offloading(self): input_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = StableUnCLIPImg2ImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img", torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _ = pipe( input_image, "anime turtle", num_inference_steps=2, output_type="np", ) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/dit/test_dit.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, Transformer2DModel from diffusers.utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, nightly, require_torch_gpu, torch_device from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class DiTPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = DiTPipeline params = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params - { "latents", "num_images_per_prompt", "callback", "callback_steps", } batch_params = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS def get_dummy_components(self): torch.manual_seed(0) transformer = Transformer2DModel( sample_size=16, num_layers=2, patch_size=4, attention_head_dim=8, num_attention_heads=2, in_channels=4, out_channels=8, attention_bias=True, activation_fn="gelu-approximate", num_embeds_ada_norm=1000, norm_type="ada_norm_zero", norm_elementwise_affine=False, ) vae = AutoencoderKL() scheduler = DDIMScheduler() components = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler} return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "class_labels": [1], "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 16, 16, 3)) expected_slice = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) @nightly @require_torch_gpu class DiTPipelineIntegrationTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_dit_256(self): generator = torch.manual_seed(0) pipe = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256") pipe.to("cuda") words = ["vase", "umbrella", "white shark", "white wolf"] ids = pipe.get_label_ids(words) images = pipe(ids, generator=generator, num_inference_steps=40, output_type="np").images for word, image in zip(words, images): expected_image = load_numpy( f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" ) assert np.abs((expected_image - image).max()) < 1e-2 def test_dit_512(self): pipe = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512") pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.to("cuda") words = ["vase", "umbrella"] ids = pipe.get_label_ids(words) generator = torch.manual_seed(0) images = pipe(ids, generator=generator, num_inference_steps=25, output_type="np").images for word, image in zip(words, images): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" f"/dit/{word}_512.npy" ) assert np.abs((expected_image - image).max()) < 1e-1
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/unidiffuser/test_unidiffuser.py
import gc import random import traceback import unittest import numpy as np import torch from PIL import Image from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection, GPT2Tokenizer, ) from diffusers import ( AutoencoderKL, DPMSolverMultistepScheduler, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, nightly, require_torch_2, require_torch_gpu, run_test_in_subprocess, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() # Will be run via run_test_in_subprocess def _test_unidiffuser_compile(in_queue, out_queue, timeout): error = None try: inputs = in_queue.get(timeout=timeout) torch_device = inputs.pop("torch_device") seed = inputs.pop("seed") inputs["generator"] = torch.Generator(device=torch_device).manual_seed(seed) pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1") # pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe = pipe.to(torch_device) pipe.unet.to(memory_format=torch.channels_last) pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) pipe.set_progress_bar_config(disable=None) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.2402, 0.2375, 0.2285, 0.2378, 0.2407, 0.2263, 0.2354, 0.2307, 0.2520]) assert np.abs(image_slice - expected_slice).max() < 1e-1 except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() class UniDiffuserPipelineFastTests( PipelineTesterMixin, PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase ): pipeline_class = UniDiffuserPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS # vae_latents, not latents, is the argument that corresponds to VAE latent inputs image_latents_params = frozenset(["vae_latents"]) def get_dummy_components(self): unet = UniDiffuserModel.from_pretrained( "hf-internal-testing/unidiffuser-diffusers-test", subfolder="unet", ) scheduler = DPMSolverMultistepScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", solver_order=3, ) vae = AutoencoderKL.from_pretrained( "hf-internal-testing/unidiffuser-diffusers-test", subfolder="vae", ) text_encoder = CLIPTextModel.from_pretrained( "hf-internal-testing/unidiffuser-diffusers-test", subfolder="text_encoder", ) clip_tokenizer = CLIPTokenizer.from_pretrained( "hf-internal-testing/unidiffuser-diffusers-test", subfolder="clip_tokenizer", ) image_encoder = CLIPVisionModelWithProjection.from_pretrained( "hf-internal-testing/unidiffuser-diffusers-test", subfolder="image_encoder", ) # From the Stable Diffusion Image Variation pipeline tests clip_image_processor = CLIPImageProcessor(crop_size=32, size=32) # image_processor = CLIPImageProcessor.from_pretrained("hf-internal-testing/tiny-random-clip") text_tokenizer = GPT2Tokenizer.from_pretrained( "hf-internal-testing/unidiffuser-diffusers-test", subfolder="text_tokenizer", ) text_decoder = UniDiffuserTextDecoder.from_pretrained( "hf-internal-testing/unidiffuser-diffusers-test", subfolder="text_decoder", ) components = { "vae": vae, "text_encoder": text_encoder, "image_encoder": image_encoder, "clip_image_processor": clip_image_processor, "clip_tokenizer": clip_tokenizer, "text_decoder": text_decoder, "text_tokenizer": text_tokenizer, "unet": unet, "scheduler": scheduler, } return components def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB") if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "an elephant under the sea", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def get_fixed_latents(self, device, seed=0): if isinstance(device, str): device = torch.device(device) generator = torch.Generator(device=device).manual_seed(seed) # Hardcode the shapes for now. prompt_latents = randn_tensor((1, 77, 32), generator=generator, device=device, dtype=torch.float32) vae_latents = randn_tensor((1, 4, 16, 16), generator=generator, device=device, dtype=torch.float32) clip_latents = randn_tensor((1, 1, 32), generator=generator, device=device, dtype=torch.float32) latents = { "prompt_latents": prompt_latents, "vae_latents": vae_latents, "clip_latents": clip_latents, } return latents def get_dummy_inputs_with_latents(self, device, seed=0): # image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) # image = image.cpu().permute(0, 2, 3, 1)[0] # image = Image.fromarray(np.uint8(image)).convert("RGB") image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unidiffuser/unidiffuser_example_image.jpg", ) image = image.resize((32, 32)) latents = self.get_fixed_latents(device, seed=seed) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "an elephant under the sea", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "prompt_latents": latents.get("prompt_latents"), "vae_latents": latents.get("vae_latents"), "clip_latents": latents.get("clip_latents"), } return inputs def test_unidiffuser_default_joint_v0(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'joint' unidiffuser_pipe.set_joint_mode() assert unidiffuser_pipe.mode == "joint" # inputs = self.get_dummy_inputs(device) inputs = self.get_dummy_inputs_with_latents(device) # Delete prompt and image for joint inference. del inputs["prompt"] del inputs["image"] sample = unidiffuser_pipe(**inputs) image = sample.images text = sample.text assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_img_slice = np.array([0.5760, 0.6270, 0.6571, 0.4965, 0.4638, 0.5663, 0.5254, 0.5068, 0.5716]) assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3 expected_text_prefix = " no no no " assert text[0][:10] == expected_text_prefix def test_unidiffuser_default_joint_no_cfg_v0(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'joint' unidiffuser_pipe.set_joint_mode() assert unidiffuser_pipe.mode == "joint" # inputs = self.get_dummy_inputs(device) inputs = self.get_dummy_inputs_with_latents(device) # Delete prompt and image for joint inference. del inputs["prompt"] del inputs["image"] # Set guidance scale to 1.0 to turn off CFG inputs["guidance_scale"] = 1.0 sample = unidiffuser_pipe(**inputs) image = sample.images text = sample.text assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_img_slice = np.array([0.5760, 0.6270, 0.6571, 0.4965, 0.4638, 0.5663, 0.5254, 0.5068, 0.5716]) assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3 expected_text_prefix = " no no no " assert text[0][:10] == expected_text_prefix def test_unidiffuser_default_text2img_v0(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'text2img' unidiffuser_pipe.set_text_to_image_mode() assert unidiffuser_pipe.mode == "text2img" inputs = self.get_dummy_inputs_with_latents(device) # Delete image for text-conditioned image generation del inputs["image"] image = unidiffuser_pipe(**inputs).images assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.5758, 0.6269, 0.6570, 0.4967, 0.4639, 0.5664, 0.5257, 0.5067, 0.5715]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_unidiffuser_default_image_0(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'img' unidiffuser_pipe.set_image_mode() assert unidiffuser_pipe.mode == "img" inputs = self.get_dummy_inputs(device) # Delete prompt and image for unconditional ("marginal") text generation. del inputs["prompt"] del inputs["image"] image = unidiffuser_pipe(**inputs).images assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.5760, 0.6270, 0.6571, 0.4966, 0.4638, 0.5663, 0.5254, 0.5068, 0.5715]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_unidiffuser_default_text_v0(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'img' unidiffuser_pipe.set_text_mode() assert unidiffuser_pipe.mode == "text" inputs = self.get_dummy_inputs(device) # Delete prompt and image for unconditional ("marginal") text generation. del inputs["prompt"] del inputs["image"] text = unidiffuser_pipe(**inputs).text expected_text_prefix = " no no no " assert text[0][:10] == expected_text_prefix def test_unidiffuser_default_img2text_v0(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'img2text' unidiffuser_pipe.set_image_to_text_mode() assert unidiffuser_pipe.mode == "img2text" inputs = self.get_dummy_inputs_with_latents(device) # Delete text for image-conditioned text generation del inputs["prompt"] text = unidiffuser_pipe(**inputs).text expected_text_prefix = " no no no " assert text[0][:10] == expected_text_prefix def test_unidiffuser_default_joint_v1(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unidiffuser_pipe = UniDiffuserPipeline.from_pretrained("hf-internal-testing/unidiffuser-test-v1") unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'joint' unidiffuser_pipe.set_joint_mode() assert unidiffuser_pipe.mode == "joint" # inputs = self.get_dummy_inputs(device) inputs = self.get_dummy_inputs_with_latents(device) # Delete prompt and image for joint inference. del inputs["prompt"] del inputs["image"] inputs["data_type"] = 1 sample = unidiffuser_pipe(**inputs) image = sample.images text = sample.text assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_img_slice = np.array([0.5760, 0.6270, 0.6571, 0.4965, 0.4638, 0.5663, 0.5254, 0.5068, 0.5716]) assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3 expected_text_prefix = " no no no " assert text[0][:10] == expected_text_prefix def test_unidiffuser_default_text2img_v1(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unidiffuser_pipe = UniDiffuserPipeline.from_pretrained("hf-internal-testing/unidiffuser-test-v1") unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'text2img' unidiffuser_pipe.set_text_to_image_mode() assert unidiffuser_pipe.mode == "text2img" inputs = self.get_dummy_inputs_with_latents(device) # Delete image for text-conditioned image generation del inputs["image"] image = unidiffuser_pipe(**inputs).images assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.5758, 0.6269, 0.6570, 0.4967, 0.4639, 0.5664, 0.5257, 0.5067, 0.5715]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_unidiffuser_default_img2text_v1(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unidiffuser_pipe = UniDiffuserPipeline.from_pretrained("hf-internal-testing/unidiffuser-test-v1") unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'img2text' unidiffuser_pipe.set_image_to_text_mode() assert unidiffuser_pipe.mode == "img2text" inputs = self.get_dummy_inputs_with_latents(device) # Delete text for image-conditioned text generation del inputs["prompt"] text = unidiffuser_pipe(**inputs).text expected_text_prefix = " no no no " assert text[0][:10] == expected_text_prefix def test_unidiffuser_text2img_multiple_images(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'text2img' unidiffuser_pipe.set_text_to_image_mode() assert unidiffuser_pipe.mode == "text2img" inputs = self.get_dummy_inputs(device) # Delete image for text-conditioned image generation del inputs["image"] inputs["num_images_per_prompt"] = 2 inputs["num_prompts_per_image"] = 3 image = unidiffuser_pipe(**inputs).images assert image.shape == (2, 32, 32, 3) def test_unidiffuser_img2text_multiple_prompts(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'img2text' unidiffuser_pipe.set_image_to_text_mode() assert unidiffuser_pipe.mode == "img2text" inputs = self.get_dummy_inputs(device) # Delete text for image-conditioned text generation del inputs["prompt"] inputs["num_images_per_prompt"] = 2 inputs["num_prompts_per_image"] = 3 text = unidiffuser_pipe(**inputs).text assert len(text) == 3 def test_unidiffuser_text2img_multiple_images_with_latents(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'text2img' unidiffuser_pipe.set_text_to_image_mode() assert unidiffuser_pipe.mode == "text2img" inputs = self.get_dummy_inputs_with_latents(device) # Delete image for text-conditioned image generation del inputs["image"] inputs["num_images_per_prompt"] = 2 inputs["num_prompts_per_image"] = 3 image = unidiffuser_pipe(**inputs).images assert image.shape == (2, 32, 32, 3) def test_unidiffuser_img2text_multiple_prompts_with_latents(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'img2text' unidiffuser_pipe.set_image_to_text_mode() assert unidiffuser_pipe.mode == "img2text" inputs = self.get_dummy_inputs_with_latents(device) # Delete text for image-conditioned text generation del inputs["prompt"] inputs["num_images_per_prompt"] = 2 inputs["num_prompts_per_image"] = 3 text = unidiffuser_pipe(**inputs).text assert len(text) == 3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=2e-4) @require_torch_gpu def test_unidiffuser_default_joint_v1_cuda_fp16(self): device = "cuda" unidiffuser_pipe = UniDiffuserPipeline.from_pretrained( "hf-internal-testing/unidiffuser-test-v1", torch_dtype=torch.float16 ) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'joint' unidiffuser_pipe.set_joint_mode() assert unidiffuser_pipe.mode == "joint" inputs = self.get_dummy_inputs_with_latents(device) # Delete prompt and image for joint inference. del inputs["prompt"] del inputs["image"] inputs["data_type"] = 1 sample = unidiffuser_pipe(**inputs) image = sample.images text = sample.text assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_img_slice = np.array([0.5049, 0.5498, 0.5854, 0.3052, 0.4460, 0.6489, 0.5122, 0.4810, 0.6138]) assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3 expected_text_prefix = '" This This' assert text[0][: len(expected_text_prefix)] == expected_text_prefix @require_torch_gpu def test_unidiffuser_default_text2img_v1_cuda_fp16(self): device = "cuda" unidiffuser_pipe = UniDiffuserPipeline.from_pretrained( "hf-internal-testing/unidiffuser-test-v1", torch_dtype=torch.float16 ) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'text2img' unidiffuser_pipe.set_text_to_image_mode() assert unidiffuser_pipe.mode == "text2img" inputs = self.get_dummy_inputs_with_latents(device) # Delete prompt and image for joint inference. del inputs["image"] inputs["data_type"] = 1 sample = unidiffuser_pipe(**inputs) image = sample.images assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_img_slice = np.array([0.5054, 0.5498, 0.5854, 0.3052, 0.4458, 0.6489, 0.5122, 0.4810, 0.6138]) assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3 @require_torch_gpu def test_unidiffuser_default_img2text_v1_cuda_fp16(self): device = "cuda" unidiffuser_pipe = UniDiffuserPipeline.from_pretrained( "hf-internal-testing/unidiffuser-test-v1", torch_dtype=torch.float16 ) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'img2text' unidiffuser_pipe.set_image_to_text_mode() assert unidiffuser_pipe.mode == "img2text" inputs = self.get_dummy_inputs_with_latents(device) # Delete prompt and image for joint inference. del inputs["prompt"] inputs["data_type"] = 1 text = unidiffuser_pipe(**inputs).text expected_text_prefix = '" This This' assert text[0][: len(expected_text_prefix)] == expected_text_prefix @nightly @require_torch_gpu class UniDiffuserPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, seed=0, generate_latents=False): generator = torch.manual_seed(seed) image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unidiffuser/unidiffuser_example_image.jpg" ) inputs = { "prompt": "an elephant under the sea", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 8.0, "output_type": "numpy", } if generate_latents: latents = self.get_fixed_latents(device, seed=seed) for latent_name, latent_tensor in latents.items(): inputs[latent_name] = latent_tensor return inputs def get_fixed_latents(self, device, seed=0): if isinstance(device, str): device = torch.device(device) latent_device = torch.device("cpu") generator = torch.Generator(device=latent_device).manual_seed(seed) # Hardcode the shapes for now. prompt_latents = randn_tensor((1, 77, 768), generator=generator, device=device, dtype=torch.float32) vae_latents = randn_tensor((1, 4, 64, 64), generator=generator, device=device, dtype=torch.float32) clip_latents = randn_tensor((1, 1, 512), generator=generator, device=device, dtype=torch.float32) # Move latents onto desired device. prompt_latents = prompt_latents.to(device) vae_latents = vae_latents.to(device) clip_latents = clip_latents.to(device) latents = { "prompt_latents": prompt_latents, "vae_latents": vae_latents, "clip_latents": clip_latents, } return latents def test_unidiffuser_default_joint_v1(self): pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1") pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() # inputs = self.get_dummy_inputs(device) inputs = self.get_inputs(device=torch_device, generate_latents=True) # Delete prompt and image for joint inference. del inputs["prompt"] del inputs["image"] sample = pipe(**inputs) image = sample.images text = sample.text assert image.shape == (1, 512, 512, 3) image_slice = image[0, -3:, -3:, -1] expected_img_slice = np.array([0.2402, 0.2375, 0.2285, 0.2378, 0.2407, 0.2263, 0.2354, 0.2307, 0.2520]) assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-1 expected_text_prefix = "a living room" assert text[0][: len(expected_text_prefix)] == expected_text_prefix def test_unidiffuser_default_text2img_v1(self): pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1") pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(device=torch_device, generate_latents=True) del inputs["image"] sample = pipe(**inputs) image = sample.images assert image.shape == (1, 512, 512, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.0242, 0.0103, 0.0022, 0.0129, 0.0000, 0.0090, 0.0376, 0.0508, 0.0005]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def test_unidiffuser_default_img2text_v1(self): pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1") pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(device=torch_device, generate_latents=True) del inputs["prompt"] sample = pipe(**inputs) text = sample.text expected_text_prefix = "An astronaut" assert text[0][: len(expected_text_prefix)] == expected_text_prefix @unittest.skip(reason="Skip torch.compile test to speed up the slow test suite.") @require_torch_2 def test_unidiffuser_compile(self, seed=0): inputs = self.get_inputs(torch_device, seed=seed, generate_latents=True) # Delete prompt and image for joint inference. del inputs["prompt"] del inputs["image"] # Can't pickle a Generator object del inputs["generator"] inputs["torch_device"] = torch_device inputs["seed"] = seed run_test_in_subprocess(test_case=self, target_func=_test_unidiffuser_compile, inputs=inputs) @nightly @require_torch_gpu class UniDiffuserPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, seed=0, generate_latents=False): generator = torch.manual_seed(seed) image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unidiffuser/unidiffuser_example_image.jpg" ) inputs = { "prompt": "an elephant under the sea", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 8.0, "output_type": "numpy", } if generate_latents: latents = self.get_fixed_latents(device, seed=seed) for latent_name, latent_tensor in latents.items(): inputs[latent_name] = latent_tensor return inputs def get_fixed_latents(self, device, seed=0): if isinstance(device, str): device = torch.device(device) latent_device = torch.device("cpu") generator = torch.Generator(device=latent_device).manual_seed(seed) # Hardcode the shapes for now. prompt_latents = randn_tensor((1, 77, 768), generator=generator, device=device, dtype=torch.float32) vae_latents = randn_tensor((1, 4, 64, 64), generator=generator, device=device, dtype=torch.float32) clip_latents = randn_tensor((1, 1, 512), generator=generator, device=device, dtype=torch.float32) # Move latents onto desired device. prompt_latents = prompt_latents.to(device) vae_latents = vae_latents.to(device) clip_latents = clip_latents.to(device) latents = { "prompt_latents": prompt_latents, "vae_latents": vae_latents, "clip_latents": clip_latents, } return latents def test_unidiffuser_default_joint_v1_fp16(self): pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1", torch_dtype=torch.float16) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() # inputs = self.get_dummy_inputs(device) inputs = self.get_inputs(device=torch_device, generate_latents=True) # Delete prompt and image for joint inference. del inputs["prompt"] del inputs["image"] sample = pipe(**inputs) image = sample.images text = sample.text assert image.shape == (1, 512, 512, 3) image_slice = image[0, -3:, -3:, -1] expected_img_slice = np.array([0.2402, 0.2375, 0.2285, 0.2378, 0.2407, 0.2263, 0.2354, 0.2307, 0.2520]) assert np.abs(image_slice.flatten() - expected_img_slice).max() < 2e-1 expected_text_prefix = "a living room" assert text[0][: len(expected_text_prefix)] == expected_text_prefix def test_unidiffuser_default_text2img_v1_fp16(self): pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1", torch_dtype=torch.float16) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(device=torch_device, generate_latents=True) del inputs["image"] sample = pipe(**inputs) image = sample.images assert image.shape == (1, 512, 512, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.0242, 0.0103, 0.0022, 0.0129, 0.0000, 0.0090, 0.0376, 0.0508, 0.0005]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def test_unidiffuser_default_img2text_v1_fp16(self): pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1", torch_dtype=torch.float16) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(device=torch_device, generate_latents=True) del inputs["prompt"] sample = pipe(**inputs) text = sample.text expected_text_prefix = "An astronaut" assert text[0][: len(expected_text_prefix)] == expected_text_prefix
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyV22ControlnetPipeline, KandinskyV22PriorPipeline, UNet2DConditionModel, VQModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, require_torch_gpu, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class KandinskyV22ControlnetPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22ControlnetPipeline params = ["image_embeds", "negative_image_embeds", "hint"] batch_params = ["image_embeds", "negative_image_embeds", "hint"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 100 @property def dummy_unet(self): torch.manual_seed(0) model_kwargs = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self): unet = self.dummy_unet movq = self.dummy_movq scheduler = DDIMScheduler( num_train_timesteps=1000, beta_schedule="linear", beta_start=0.00085, beta_end=0.012, clip_sample=False, set_alpha_to_one=False, steps_offset=1, prediction_type="epsilon", thresholding=False, ) components = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def get_dummy_inputs(self, device, seed=0): image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device) negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to( device ) # create hint hint = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def test_kandinsky_controlnet(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] ) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=5e-4) @nightly @require_torch_gpu class KandinskyV22ControlnetPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinsky_controlnet(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy" ) hint = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png" ) hint = torch.from_numpy(np.array(hint)).float() / 255.0 hint = hint.permute(2, 0, 1).unsqueeze(0) pipe_prior = KandinskyV22PriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ) pipe_prior.to(torch_device) pipeline = KandinskyV22ControlnetPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16 ) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) prompt = "A robot, 4k photo" generator = torch.Generator(device="cuda").manual_seed(0) image_emb, zero_image_emb = pipe_prior( prompt, generator=generator, num_inference_steps=5, negative_prompt="", ).to_tuple() generator = torch.Generator(device="cuda").manual_seed(0) output = pipeline( image_embeds=image_emb, negative_image_embeds=zero_image_emb, hint=hint, generator=generator, num_inference_steps=100, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(image, expected_image)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_combined.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from diffusers import ( KandinskyV22CombinedPipeline, KandinskyV22Img2ImgCombinedPipeline, KandinskyV22InpaintCombinedPipeline, ) from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin from .test_kandinsky import Dummies from .test_kandinsky_img2img import Dummies as Img2ImgDummies from .test_kandinsky_inpaint import Dummies as InpaintDummies from .test_kandinsky_prior import Dummies as PriorDummies enable_full_determinism() class KandinskyV22PipelineCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22CombinedPipeline params = [ "prompt", ] batch_params = ["prompt", "negative_prompt"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = True callback_cfg_params = ["image_embds"] def get_dummy_components(self): dummy = Dummies() prior_dummy = PriorDummies() components = dummy.get_dummy_components() components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) return components def get_dummy_inputs(self, device, seed=0): prior_dummy = PriorDummies() inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) inputs.update( { "height": 64, "width": 64, } ) return inputs def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3013, 0.0471, 0.5176, 0.1817, 0.2566, 0.7076, 0.6712, 0.4421, 0.7503]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) def test_model_cpu_offload_forward_pass(self): super().test_model_cpu_offload_forward_pass(expected_max_diff=5e-4) def test_save_load_local(self): super().test_save_load_local(expected_max_difference=5e-3) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=5e-3) def test_callback_inputs(self): pass def test_callback_cfg(self): pass class KandinskyV22PipelineImg2ImgCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22Img2ImgCombinedPipeline params = ["prompt", "image"] batch_params = ["prompt", "negative_prompt", "image"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False callback_cfg_params = ["image_embds"] def get_dummy_components(self): dummy = Img2ImgDummies() prior_dummy = PriorDummies() components = dummy.get_dummy_components() components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) return components def get_dummy_inputs(self, device, seed=0): prior_dummy = PriorDummies() dummy = Img2ImgDummies() inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) inputs.update(dummy.get_dummy_inputs(device=device, seed=seed)) inputs.pop("image_embeds") inputs.pop("negative_image_embeds") return inputs def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4353, 0.4710, 0.5128, 0.4806, 0.5054, 0.5348, 0.5224, 0.4603, 0.5025]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=2e-1) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) def test_model_cpu_offload_forward_pass(self): super().test_model_cpu_offload_forward_pass(expected_max_diff=5e-4) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=5e-4) def save_load_local(self): super().test_save_load_local(expected_max_difference=5e-3) def test_callback_inputs(self): pass def test_callback_cfg(self): pass class KandinskyV22PipelineInpaintCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22InpaintCombinedPipeline params = ["prompt", "image", "mask_image"] batch_params = ["prompt", "negative_prompt", "image", "mask_image"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False def get_dummy_components(self): dummy = InpaintDummies() prior_dummy = PriorDummies() components = dummy.get_dummy_components() components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) return components def get_dummy_inputs(self, device, seed=0): prior_dummy = PriorDummies() dummy = InpaintDummies() inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) inputs.update(dummy.get_dummy_inputs(device=device, seed=seed)) inputs.pop("image_embeds") inputs.pop("negative_image_embeds") return inputs def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5039, 0.4926, 0.4898, 0.4978, 0.4838, 0.4942, 0.4738, 0.4702, 0.4816]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) def test_model_cpu_offload_forward_pass(self): super().test_model_cpu_offload_forward_pass(expected_max_diff=5e-4) def test_save_load_local(self): super().test_save_load_local(expected_max_difference=5e-3) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=5e-4) def test_sequential_cpu_offload_forward_pass(self): super().test_sequential_cpu_offload_forward_pass(expected_max_diff=5e-4) def test_callback_inputs(self): pass def test_callback_cfg(self): pass
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_inpaint.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyV22InpaintPipeline, KandinskyV22PriorPipeline, UNet2DConditionModel, VQModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, slow, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class Dummies: @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 32 @property def dummy_unet(self): torch.manual_seed(0) model_kwargs = { "in_channels": 9, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self): unet = self.dummy_unet movq = self.dummy_movq scheduler = DDIMScheduler( num_train_timesteps=1000, beta_schedule="linear", beta_start=0.00085, beta_end=0.012, clip_sample=False, set_alpha_to_one=False, steps_offset=1, prediction_type="epsilon", thresholding=False, ) components = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def get_dummy_inputs(self, device, seed=0): image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device) negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to( device ) # create init_image image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256)) # create mask mask = np.zeros((64, 64), dtype=np.float32) mask[:32, :32] = 1 if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": init_image, "mask_image": mask, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 2, "guidance_scale": 4.0, "output_type": "np", } return inputs class KandinskyV22InpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22InpaintPipeline params = ["image_embeds", "negative_image_embeds", "image", "mask_image"] batch_params = [ "image_embeds", "negative_image_embeds", "image", "mask_image", ] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False callback_cfg_params = ["image_embeds", "masked_image", "mask_image"] def get_dummy_components(self): dummies = Dummies() return dummies.get_dummy_components() def get_dummy_inputs(self, device, seed=0): dummies = Dummies() return dummies.get_dummy_inputs(device=device, seed=seed) def test_kandinsky_inpaint(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848] ) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) def test_model_cpu_offload_forward_pass(self): super().test_inference_batch_single_identical(expected_max_diff=5e-4) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=5e-4) def test_sequential_cpu_offload_forward_pass(self): super().test_sequential_cpu_offload_forward_pass(expected_max_diff=5e-4) # override default test because we need to zero out mask too in order to make sure final latent is all zero def test_callback_inputs(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_test(pipe, i, t, callback_kwargs): missing_callback_inputs = set() for v in pipe._callback_tensor_inputs: if v not in callback_kwargs: missing_callback_inputs.add(v) self.assertTrue( len(missing_callback_inputs) == 0, f"Missing callback tensor inputs: {missing_callback_inputs}" ) last_i = pipe.num_timesteps - 1 if i == last_i: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) callback_kwargs["mask_image"] = torch.zeros_like(callback_kwargs["mask_image"]) return callback_kwargs inputs = self.get_dummy_inputs(torch_device) inputs["callback_on_step_end"] = callback_inputs_test inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs inputs["output_type"] = "latent" output = pipe(**inputs)[0] assert output.abs().sum() == 0 @slow @require_torch_gpu class KandinskyV22InpaintPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinsky_inpaint(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" ) init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) mask = np.zeros((768, 768), dtype=np.float32) mask[:250, 250:-250] = 1 prompt = "a hat" pipe_prior = KandinskyV22PriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ) pipe_prior.to(torch_device) pipeline = KandinskyV22InpaintPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16 ) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) image_emb, zero_image_emb = pipe_prior( prompt, generator=generator, num_inference_steps=5, negative_prompt="", ).to_tuple() output = pipeline( image=init_image, mask_image=mask, image_embeds=image_emb, negative_image_embeds=zero_image_emb, generator=generator, num_inference_steps=100, height=768, width=768, output_type="np", ) image = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(image, expected_image)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_img2img.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline, UNet2DConditionModel, VQModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, slow, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class Dummies: @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 32 @property def dummy_unet(self): torch.manual_seed(0) model_kwargs = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self): unet = self.dummy_unet movq = self.dummy_movq ddim_config = { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.00085, "beta_end": 0.012, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } scheduler = DDIMScheduler(**ddim_config) components = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def get_dummy_inputs(self, device, seed=0): image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device) negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to( device ) # create init_image image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256)) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs class KandinskyV22Img2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22Img2ImgPipeline params = ["image_embeds", "negative_image_embeds", "image"] batch_params = [ "image_embeds", "negative_image_embeds", "image", ] required_optional_params = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False callback_cfg_params = ["image_embeds"] def get_dummy_components(self): dummies = Dummies() return dummies.get_dummy_components() def get_dummy_inputs(self, device, seed=0): dummies = Dummies() return dummies.get_dummy_inputs(device=device, seed=seed) def test_kandinsky_img2img(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5712, 0.5443, 0.4725, 0.6195, 0.5184, 0.4651, 0.4473, 0.4590, 0.5016]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def test_float16_inference(self): super().test_float16_inference(expected_max_diff=2e-1) @slow @require_torch_gpu class KandinskyV22Img2ImgPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinsky_img2img(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_img2img_frog.npy" ) init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) prompt = "A red cartoon frog, 4k" pipe_prior = KandinskyV22PriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ) pipe_prior.to(torch_device) pipeline = KandinskyV22Img2ImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 ) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) image_emb, zero_image_emb = pipe_prior( prompt, generator=generator, num_inference_steps=5, negative_prompt="", ).to_tuple() output = pipeline( image=init_image, image_embeds=image_emb, negative_image_embeds=zero_image_emb, generator=generator, num_inference_steps=100, height=768, width=768, strength=0.2, output_type="np", ) image = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(image, expected_image)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_prior.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyV22PriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils.testing_utils import enable_full_determinism, skip_mps, torch_device from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class Dummies: @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 100 @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModelWithProjection(config) @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = { "num_attention_heads": 2, "attention_head_dim": 12, "embedding_dim": self.text_embedder_hidden_size, "num_layers": 1, } model = PriorTransformer(**model_kwargs) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 model.clip_std = nn.Parameter(torch.ones(model.clip_std.shape)) return model @property def dummy_image_encoder(self): torch.manual_seed(0) config = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size, image_size=224, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=14, ) model = CLIPVisionModelWithProjection(config) return model @property def dummy_image_processor(self): image_processor = CLIPImageProcessor( crop_size=224, do_center_crop=True, do_normalize=True, do_resize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], resample=3, size=224, ) return image_processor def get_dummy_components(self): prior = self.dummy_prior image_encoder = self.dummy_image_encoder text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer image_processor = self.dummy_image_processor scheduler = UnCLIPScheduler( variance_type="fixed_small_log", prediction_type="sample", num_train_timesteps=1000, clip_sample=True, clip_sample_range=10.0, ) components = { "prior": prior, "image_encoder": image_encoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "scheduler": scheduler, "image_processor": image_processor, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "generator": generator, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs class KandinskyV22PriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22PriorPipeline params = ["prompt"] batch_params = ["prompt", "negative_prompt"] required_optional_params = [ "num_images_per_prompt", "generator", "num_inference_steps", "latents", "negative_prompt", "guidance_scale", "output_type", "return_dict", ] callback_cfg_params = ["prompt_embeds", "text_encoder_hidden_states", "text_mask"] test_xformers_attention = False def get_dummy_components(self): dummies = Dummies() return dummies.get_dummy_components() def get_dummy_inputs(self, device, seed=0): dummies = Dummies() return dummies.get_dummy_inputs(device=device, seed=seed) def test_kandinsky_prior(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.image_embeds image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -10:] image_from_tuple_slice = image_from_tuple[0, -10:] assert image.shape == (1, 32) expected_slice = np.array( [-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @skip_mps def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-3) @skip_mps def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" test_mean_pixel_difference = False self._test_attention_slicing_forward_pass( test_max_difference=test_max_difference, test_mean_pixel_difference=test_mean_pixel_difference, ) # override default test because no output_type "latent", use "pt" instead def test_callback_inputs(self): sig = inspect.signature(self.pipeline_class.__call__) if not ("callback_on_step_end_tensor_inputs" in sig.parameters and "callback_on_step_end" in sig.parameters): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_test(pipe, i, t, callback_kwargs): missing_callback_inputs = set() for v in pipe._callback_tensor_inputs: if v not in callback_kwargs: missing_callback_inputs.add(v) self.assertTrue( len(missing_callback_inputs) == 0, f"Missing callback tensor inputs: {missing_callback_inputs}" ) last_i = pipe.num_timesteps - 1 if i == last_i: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) return callback_kwargs inputs = self.get_dummy_inputs(torch_device) inputs["callback_on_step_end"] = callback_inputs_test inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs inputs["num_inference_steps"] = 2 inputs["output_type"] = "pt" output = pipe(**inputs)[0] assert output.abs().sum() == 0
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet_img2img.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyV22ControlnetImg2ImgPipeline, KandinskyV22PriorEmb2EmbPipeline, UNet2DConditionModel, VQModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, require_torch_gpu, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class KandinskyV22ControlnetImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22ControlnetImg2ImgPipeline params = ["image_embeds", "negative_image_embeds", "image", "hint"] batch_params = ["image_embeds", "negative_image_embeds", "image", "hint"] required_optional_params = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 100 @property def dummy_unet(self): torch.manual_seed(0) model_kwargs = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self): unet = self.dummy_unet movq = self.dummy_movq ddim_config = { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.00085, "beta_end": 0.012, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } scheduler = DDIMScheduler(**ddim_config) components = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def get_dummy_inputs(self, device, seed=0): image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device) negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to( device ) # create init_image image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256)) # create hint hint = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def test_kandinsky_controlnet_img2img(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] ) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1.75e-3) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=2e-1) @nightly @require_torch_gpu class KandinskyV22ControlnetImg2ImgPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinsky_controlnet_img2img(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" ) init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) init_image = init_image.resize((512, 512)) hint = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png" ) hint = torch.from_numpy(np.array(hint)).float() / 255.0 hint = hint.permute(2, 0, 1).unsqueeze(0) prompt = "A robot, 4k photo" pipe_prior = KandinskyV22PriorEmb2EmbPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ) pipe_prior.to(torch_device) pipeline = KandinskyV22ControlnetImg2ImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16 ) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) image_emb, zero_image_emb = pipe_prior( prompt, image=init_image, strength=0.85, generator=generator, negative_prompt="", ).to_tuple() output = pipeline( image=init_image, image_embeds=image_emb, negative_image_embeds=zero_image_emb, hint=hint, generator=generator, num_inference_steps=100, height=512, width=512, strength=0.5, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(image, expected_image)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_prior_emb2emb.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np import torch from PIL import Image from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyV22PriorEmb2EmbPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, skip_mps, torch_device from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class KandinskyV22PriorEmb2EmbPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22PriorEmb2EmbPipeline params = ["prompt", "image"] batch_params = ["prompt", "image"] required_optional_params = [ "num_images_per_prompt", "strength", "generator", "num_inference_steps", "negative_prompt", "guidance_scale", "output_type", "return_dict", ] test_xformers_attention = False @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 100 @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModelWithProjection(config) @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = { "num_attention_heads": 2, "attention_head_dim": 12, "embedding_dim": self.text_embedder_hidden_size, "num_layers": 1, } model = PriorTransformer(**model_kwargs) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 model.clip_std = nn.Parameter(torch.ones(model.clip_std.shape)) return model @property def dummy_image_encoder(self): torch.manual_seed(0) config = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size, image_size=224, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=14, ) model = CLIPVisionModelWithProjection(config) return model @property def dummy_image_processor(self): image_processor = CLIPImageProcessor( crop_size=224, do_center_crop=True, do_normalize=True, do_resize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], resample=3, size=224, ) return image_processor def get_dummy_components(self): prior = self.dummy_prior image_encoder = self.dummy_image_encoder text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer image_processor = self.dummy_image_processor scheduler = UnCLIPScheduler( variance_type="fixed_small_log", prediction_type="sample", num_train_timesteps=1000, clip_sample=True, clip_sample_range=10.0, ) components = { "prior": prior, "image_encoder": image_encoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "scheduler": scheduler, "image_processor": image_processor, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256)) inputs = { "prompt": "horse", "image": init_image, "strength": 0.5, "generator": generator, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def test_kandinsky_prior_emb2emb(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.image_embeds image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -10:] image_from_tuple_slice = image_from_tuple[0, -10:] assert image.shape == (1, 32) expected_slice = np.array( [ 0.1071284, 1.3330271, 0.61260223, -0.6691065, -0.3846852, -1.0303661, 0.22716111, 0.03348901, 0.30040675, -0.24805029, ] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @skip_mps def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-2) @skip_mps def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" test_mean_pixel_difference = False self._test_attention_slicing_forward_pass( test_max_difference=test_max_difference, test_mean_pixel_difference=test_mean_pixel_difference, )
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, KandinskyV22Pipeline, KandinskyV22PriorPipeline, UNet2DConditionModel, VQModel from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_numpy, require_torch_gpu, slow, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class Dummies: @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 32 @property def dummy_unet(self): torch.manual_seed(0) model_kwargs = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self): unet = self.dummy_unet movq = self.dummy_movq scheduler = DDIMScheduler( num_train_timesteps=1000, beta_schedule="linear", beta_start=0.00085, beta_end=0.012, clip_sample=False, set_alpha_to_one=False, steps_offset=1, prediction_type="epsilon", thresholding=False, ) components = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def get_dummy_inputs(self, device, seed=0): image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device) negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to( device ) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs class KandinskyV22PipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22Pipeline params = [ "image_embeds", "negative_image_embeds", ] batch_params = ["image_embeds", "negative_image_embeds"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] callback_cfg_params = ["image_embds"] test_xformers_attention = False def get_dummy_inputs(self, device, seed=0): dummies = Dummies() return dummies.get_dummy_inputs(device=device, seed=seed) def get_dummy_components(self): dummies = Dummies() return dummies.get_dummy_components() def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3420, 0.9505, 0.3919, 1.0000, 0.5188, 0.3109, 0.6139, 0.5624, 0.6811]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) @slow @require_torch_gpu class KandinskyV22PipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinsky_text2img(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" ) pipe_prior = KandinskyV22PriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ) pipe_prior.to(torch_device) pipeline = KandinskyV22Pipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 ) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) prompt = "red cat, 4k photo" generator = torch.Generator(device="cuda").manual_seed(0) image_emb, zero_image_emb = pipe_prior( prompt, generator=generator, num_inference_steps=5, negative_prompt="", ).to_tuple() generator = torch.Generator(device="cuda").manual_seed(0) output = pipeline( image_embeds=image_emb, negative_image_embeds=zero_image_emb, generator=generator, num_inference_steps=100, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(image, expected_image)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/animatediff/test_animatediff.py
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AnimateDiffPipeline, AutoencoderKL, DDIMScheduler, MotionAdapter, UNet2DConditionModel, UNetMotionModel, ) from diffusers.utils import logging from diffusers.utils.testing_utils import numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin def to_np(tensor): if isinstance(tensor, torch.Tensor): tensor = tensor.detach().cpu().numpy() return tensor class AnimateDiffPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = AnimateDiffPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=2, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="linear", clip_sample=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") motion_adapter = MotionAdapter( block_out_channels=(32, 64), motion_layers_per_block=2, motion_norm_num_groups=2, motion_num_attention_heads=4, ) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "motion_adapter": motion_adapter, "text_encoder": text_encoder, "tokenizer": tokenizer, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 7.5, "output_type": "pt", } return inputs def test_motion_unet_loading(self): components = self.get_dummy_components() pipe = AnimateDiffPipeline(**components) assert isinstance(pipe.unet, UNetMotionModel) @unittest.skip("Attention slicing is not enabled in this pipeline") def test_attention_slicing_forward_pass(self): pass def test_inference_batch_single_identical( self, batch_size=2, expected_max_diff=1e-4, additional_params_copy_to_batched_inputs=["num_inference_steps"], ): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for components in pipe.components.values(): if hasattr(components, "set_default_attn_processor"): components.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) # Reset generator in case it is has been used in self.get_dummy_inputs inputs["generator"] = self.get_generator(0) logger = logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # batchify inputs batched_inputs = {} batched_inputs.update(inputs) for name in self.batch_params: if name not in inputs: continue value = inputs[name] if name == "prompt": len_prompt = len(value) batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] batched_inputs[name][-1] = 100 * "very long" else: batched_inputs[name] = batch_size * [value] if "generator" in inputs: batched_inputs["generator"] = [self.get_generator(i) for i in range(batch_size)] if "batch_size" in inputs: batched_inputs["batch_size"] = batch_size for arg in additional_params_copy_to_batched_inputs: batched_inputs[arg] = inputs[arg] output = pipe(**inputs) output_batch = pipe(**batched_inputs) assert output_batch[0].shape[0] == batch_size max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to("cpu") # pipeline creates a new motion UNet under the hood. So we need to check the device from pipe.components model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] self.assertTrue(all(device == "cpu" for device in model_devices)) output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] self.assertTrue(np.isnan(output_cpu).sum() == 0) pipe.to("cuda") model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] self.assertTrue(all(device == "cuda" for device in model_devices)) output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) def test_to_dtype(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) # pipeline creates a new motion UNet under the hood. So we need to check the dtype from pipe.components model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes)) pipe.to(torch_dtype=torch.float16) model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) def test_prompt_embeds(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) inputs = self.get_dummy_inputs(torch_device) inputs.pop("prompt") inputs["prompt_embeds"] = torch.randn((1, 4, 32), device=torch_device) pipe(**inputs) @slow @require_torch_gpu class AnimateDiffPipelineSlowTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_animatediff(self): adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2") pipe = AnimateDiffPipeline.from_pretrained("frankjoshua/toonyou_beta6", motion_adapter=adapter) pipe = pipe.to(torch_device) pipe.scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="linear", steps_offset=1, clip_sample=False, ) pipe.enable_vae_slicing() pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) prompt = "night, b&w photo of old house, post apocalypse, forest, storm weather, wind, rocks, 8k uhd, dslr, soft lighting, high quality, film grain" negative_prompt = "bad quality, worse quality" generator = torch.Generator("cpu").manual_seed(0) output = pipe( prompt, negative_prompt=negative_prompt, num_frames=16, generator=generator, guidance_scale=7.5, num_inference_steps=3, output_type="np", ) image = output.frames[0] assert image.shape == (16, 512, 512, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array( [ 0.11357737, 0.11285847, 0.11180121, 0.11084166, 0.11414117, 0.09785956, 0.10742754, 0.10510018, 0.08045256, ] ) assert numpy_cosine_similarity_distance(image_slice.flatten(), expected_slice.flatten()) < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky/test_kandinsky_combined.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from diffusers import KandinskyCombinedPipeline, KandinskyImg2ImgCombinedPipeline, KandinskyInpaintCombinedPipeline from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin from .test_kandinsky import Dummies from .test_kandinsky_img2img import Dummies as Img2ImgDummies from .test_kandinsky_inpaint import Dummies as InpaintDummies from .test_kandinsky_prior import Dummies as PriorDummies enable_full_determinism() class KandinskyPipelineCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyCombinedPipeline params = [ "prompt", ] batch_params = ["prompt", "negative_prompt"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = True def get_dummy_components(self): dummy = Dummies() prior_dummy = PriorDummies() components = dummy.get_dummy_components() components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) return components def get_dummy_inputs(self, device, seed=0): prior_dummy = PriorDummies() inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) inputs.update( { "height": 64, "width": 64, } ) return inputs def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.0000, 0.0000, 0.6777, 0.1363, 0.3624, 0.7868, 0.3869, 0.3395, 0.5068]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=2e-1) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) class KandinskyPipelineImg2ImgCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyImg2ImgCombinedPipeline params = ["prompt", "image"] batch_params = ["prompt", "negative_prompt", "image"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False def get_dummy_components(self): dummy = Img2ImgDummies() prior_dummy = PriorDummies() components = dummy.get_dummy_components() components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) return components def get_dummy_inputs(self, device, seed=0): prior_dummy = PriorDummies() dummy = Img2ImgDummies() inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) inputs.update(dummy.get_dummy_inputs(device=device, seed=seed)) inputs.pop("image_embeds") inputs.pop("negative_image_embeds") return inputs def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4260, 0.3596, 0.4571, 0.3890, 0.4087, 0.5137, 0.4819, 0.4116, 0.5053]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=5e-4) class KandinskyPipelineInpaintCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyInpaintCombinedPipeline params = ["prompt", "image", "mask_image"] batch_params = ["prompt", "negative_prompt", "image", "mask_image"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False def get_dummy_components(self): dummy = InpaintDummies() prior_dummy = PriorDummies() components = dummy.get_dummy_components() components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) return components def get_dummy_inputs(self, device, seed=0): prior_dummy = PriorDummies() dummy = InpaintDummies() inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) inputs.update(dummy.get_dummy_inputs(device=device, seed=seed)) inputs.pop("image_embeds") inputs.pop("negative_image_embeds") return inputs def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.0477, 0.0808, 0.2972, 0.2705, 0.3620, 0.6247, 0.4464, 0.2870, 0.3530]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=5e-4) def test_save_load_local(self): super().test_save_load_local(expected_max_difference=5e-3)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky/test_kandinsky_inpaint.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNet2DConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, require_torch_gpu, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class Dummies: @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 32 @property def dummy_tokenizer(self): tokenizer = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = MCLIPConfig( numDims=self.cross_attention_dim, transformerDimensions=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_hidden_layers=5, vocab_size=1005, ) text_encoder = MultilingualCLIP(config) text_encoder = text_encoder.eval() return text_encoder @property def dummy_unet(self): torch.manual_seed(0) model_kwargs = { "in_channels": 9, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self): text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer unet = self.dummy_unet movq = self.dummy_movq scheduler = DDIMScheduler( num_train_timesteps=1000, beta_schedule="linear", beta_start=0.00085, beta_end=0.012, clip_sample=False, set_alpha_to_one=False, steps_offset=1, prediction_type="epsilon", thresholding=False, ) components = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def get_dummy_inputs(self, device, seed=0): image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed)).to(device) negative_image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1)).to(device) # create init_image image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256)) # create mask mask = np.zeros((64, 64), dtype=np.float32) mask[:32, :32] = 1 if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "image": init_image, "mask_image": mask, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 2, "guidance_scale": 4.0, "output_type": "np", } return inputs class KandinskyInpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyInpaintPipeline params = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"] batch_params = [ "prompt", "negative_prompt", "image_embeds", "negative_image_embeds", "image", "mask_image", ] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False def get_dummy_components(self): dummies = Dummies() return dummies.get_dummy_components() def get_dummy_inputs(self, device, seed=0): dummies = Dummies() return dummies.get_dummy_inputs(device=device, seed=seed) def test_kandinsky_inpaint(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.8222, 0.8896, 0.4373, 0.8088, 0.4905, 0.2609, 0.6816, 0.4291, 0.5129]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) @nightly @require_torch_gpu class KandinskyInpaintPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinsky_inpaint(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" ) init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) mask = np.zeros((768, 768), dtype=np.float32) mask[:250, 250:-250] = 1 prompt = "a hat" pipe_prior = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16 ) pipe_prior.to(torch_device) pipeline = KandinskyInpaintPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-inpaint", torch_dtype=torch.float16 ) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) image_emb, zero_image_emb = pipe_prior( prompt, generator=generator, num_inference_steps=5, negative_prompt="", ).to_tuple() output = pipeline( prompt, image=init_image, mask_image=mask, image_embeds=image_emb, negative_image_embeds=zero_image_emb, generator=generator, num_inference_steps=100, height=768, width=768, output_type="np", ) image = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(image, expected_image)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky/test_kandinsky_img2img.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import ( DDIMScheduler, DDPMScheduler, KandinskyImg2ImgPipeline, KandinskyPriorPipeline, UNet2DConditionModel, VQModel, ) from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class Dummies: @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 32 @property def dummy_tokenizer(self): tokenizer = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = MCLIPConfig( numDims=self.cross_attention_dim, transformerDimensions=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_hidden_layers=5, vocab_size=1005, ) text_encoder = MultilingualCLIP(config) text_encoder = text_encoder.eval() return text_encoder @property def dummy_unet(self): torch.manual_seed(0) model_kwargs = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self): text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer unet = self.dummy_unet movq = self.dummy_movq ddim_config = { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.00085, "beta_end": 0.012, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } scheduler = DDIMScheduler(**ddim_config) components = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def get_dummy_inputs(self, device, seed=0): image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed)).to(device) negative_image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1)).to(device) # create init_image image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256)) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs class KandinskyImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyImg2ImgPipeline params = ["prompt", "image_embeds", "negative_image_embeds", "image"] batch_params = [ "prompt", "negative_prompt", "image_embeds", "negative_image_embeds", "image", ] required_optional_params = [ "generator", "height", "width", "strength", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False def get_dummy_components(self): dummies = Dummies() return dummies.get_dummy_components() def get_dummy_inputs(self, device, seed=0): dummies = Dummies() return dummies.get_dummy_inputs(device=device, seed=seed) def test_kandinsky_img2img(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5816, 0.5872, 0.4634, 0.5982, 0.4767, 0.4710, 0.4669, 0.4717, 0.4966]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) @slow @require_torch_gpu class KandinskyImg2ImgPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinsky_img2img(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_img2img_frog.npy" ) init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) prompt = "A red cartoon frog, 4k" pipe_prior = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16 ) pipe_prior.to(torch_device) pipeline = KandinskyImg2ImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16 ) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) image_emb, zero_image_emb = pipe_prior( prompt, generator=generator, num_inference_steps=5, negative_prompt="", ).to_tuple() output = pipeline( prompt, image=init_image, image_embeds=image_emb, negative_image_embeds=zero_image_emb, generator=generator, num_inference_steps=100, height=768, width=768, strength=0.2, output_type="np", ) image = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(image, expected_image) @nightly @require_torch_gpu class KandinskyImg2ImgPipelineNightlyTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinsky_img2img_ddpm(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_img2img_ddpm_frog.npy" ) init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/frog.png" ) prompt = "A red cartoon frog, 4k" pipe_prior = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16 ) pipe_prior.to(torch_device) scheduler = DDPMScheduler.from_pretrained("kandinsky-community/kandinsky-2-1", subfolder="ddpm_scheduler") pipeline = KandinskyImg2ImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1", scheduler=scheduler, torch_dtype=torch.float16 ) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) image_emb, zero_image_emb = pipe_prior( prompt, generator=generator, num_inference_steps=5, negative_prompt="", ).to_tuple() output = pipeline( prompt, image=init_image, image_embeds=image_emb, negative_image_embeds=zero_image_emb, generator=generator, num_inference_steps=100, height=768, width=768, strength=0.2, output_type="np", ) image = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(image, expected_image)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky/test_kandinsky_prior.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils.testing_utils import enable_full_determinism, skip_mps, torch_device from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class Dummies: @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 100 @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModelWithProjection(config) @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = { "num_attention_heads": 2, "attention_head_dim": 12, "embedding_dim": self.text_embedder_hidden_size, "num_layers": 1, } model = PriorTransformer(**model_kwargs) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 model.clip_std = nn.Parameter(torch.ones(model.clip_std.shape)) return model @property def dummy_image_encoder(self): torch.manual_seed(0) config = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size, image_size=224, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=14, ) model = CLIPVisionModelWithProjection(config) return model @property def dummy_image_processor(self): image_processor = CLIPImageProcessor( crop_size=224, do_center_crop=True, do_normalize=True, do_resize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], resample=3, size=224, ) return image_processor def get_dummy_components(self): prior = self.dummy_prior image_encoder = self.dummy_image_encoder text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer image_processor = self.dummy_image_processor scheduler = UnCLIPScheduler( variance_type="fixed_small_log", prediction_type="sample", num_train_timesteps=1000, clip_sample=True, clip_sample_range=10.0, ) components = { "prior": prior, "image_encoder": image_encoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "scheduler": scheduler, "image_processor": image_processor, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "generator": generator, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs class KandinskyPriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyPriorPipeline params = ["prompt"] batch_params = ["prompt", "negative_prompt"] required_optional_params = [ "num_images_per_prompt", "generator", "num_inference_steps", "latents", "negative_prompt", "guidance_scale", "output_type", "return_dict", ] test_xformers_attention = False def get_dummy_components(self): dummy = Dummies() return dummy.get_dummy_components() def get_dummy_inputs(self, device, seed=0): dummy = Dummies() return dummy.get_dummy_inputs(device=device, seed=seed) def test_kandinsky_prior(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.image_embeds image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -10:] image_from_tuple_slice = image_from_tuple[0, -10:] assert image.shape == (1, 32) expected_slice = np.array( [-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @skip_mps def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-2) @skip_mps def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" test_mean_pixel_difference = False self._test_attention_slicing_forward_pass( test_max_difference=test_max_difference, test_mean_pixel_difference=test_mean_pixel_difference, )
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky/test_kandinsky.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyPipeline, KandinskyPriorPipeline, UNet2DConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_numpy, require_torch_gpu, slow, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class Dummies: @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 32 @property def dummy_tokenizer(self): tokenizer = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = MCLIPConfig( numDims=self.cross_attention_dim, transformerDimensions=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_hidden_layers=5, vocab_size=1005, ) text_encoder = MultilingualCLIP(config) text_encoder = text_encoder.eval() return text_encoder @property def dummy_unet(self): torch.manual_seed(0) model_kwargs = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self): text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer unet = self.dummy_unet movq = self.dummy_movq scheduler = DDIMScheduler( num_train_timesteps=1000, beta_schedule="linear", beta_start=0.00085, beta_end=0.012, clip_sample=False, set_alpha_to_one=False, steps_offset=1, prediction_type="epsilon", thresholding=False, ) components = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def get_dummy_inputs(self, device, seed=0): image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed)).to(device) negative_image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1)).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs class KandinskyPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyPipeline params = [ "prompt", "image_embeds", "negative_image_embeds", ] batch_params = ["prompt", "negative_prompt", "image_embeds", "negative_image_embeds"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False def get_dummy_components(self): dummy = Dummies() return dummy.get_dummy_components() def get_dummy_inputs(self, device, seed=0): dummy = Dummies() return dummy.get_dummy_inputs(device=device, seed=seed) def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([1.0000, 1.0000, 0.2766, 1.0000, 0.5447, 0.1737, 1.0000, 0.4316, 0.9024]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 @slow @require_torch_gpu class KandinskyPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinsky_text2img(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_text2img_cat_fp16.npy" ) pipe_prior = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16 ) pipe_prior.to(torch_device) pipeline = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) prompt = "red cat, 4k photo" generator = torch.Generator(device="cuda").manual_seed(0) image_emb, zero_image_emb = pipe_prior( prompt, generator=generator, num_inference_steps=5, negative_prompt="", ).to_tuple() generator = torch.Generator(device="cuda").manual_seed(0) output = pipeline( prompt, image_embeds=image_emb, negative_image_embeds=zero_image_emb, generator=generator, num_inference_steps=100, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(image, expected_image)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/wuerstchen/test_wuerstchen_prior.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import DDPMWuerstchenScheduler, WuerstchenPriorPipeline from diffusers.loaders import AttnProcsLayers from diffusers.models.attention_processor import ( LoRAAttnProcessor, LoRAAttnProcessor2_0, ) from diffusers.pipelines.wuerstchen import WuerstchenPrior from diffusers.utils.import_utils import is_peft_available from diffusers.utils.testing_utils import enable_full_determinism, require_peft_backend, skip_mps, torch_device if is_peft_available(): from peft import LoraConfig from peft.tuners.tuners_utils import BaseTunerLayer from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() def create_prior_lora_layers(unet: nn.Module): lora_attn_procs = {} for name in unet.attn_processors.keys(): lora_attn_processor_class = ( LoRAAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else LoRAAttnProcessor ) lora_attn_procs[name] = lora_attn_processor_class( hidden_size=unet.config.c, ) unet_lora_layers = AttnProcsLayers(lora_attn_procs) return lora_attn_procs, unet_lora_layers class WuerstchenPriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = WuerstchenPriorPipeline params = ["prompt"] batch_params = ["prompt", "negative_prompt"] required_optional_params = [ "num_images_per_prompt", "generator", "num_inference_steps", "latents", "negative_prompt", "guidance_scale", "output_type", "return_dict", ] test_xformers_attention = False callback_cfg_params = ["text_encoder_hidden_states"] @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(config).eval() @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = { "c_in": 2, "c": 8, "depth": 2, "c_cond": 32, "c_r": 8, "nhead": 2, } model = WuerstchenPrior(**model_kwargs) return model.eval() def get_dummy_components(self): prior = self.dummy_prior text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer scheduler = DDPMWuerstchenScheduler() components = { "prior": prior, "text_encoder": text_encoder, "tokenizer": tokenizer, "scheduler": scheduler, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "generator": generator, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def test_wuerstchen_prior(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.image_embeddings image_from_tuple = pipe(**self.get_dummy_inputs(device), return_dict=False)[0] image_slice = image[0, 0, 0, -10:] image_from_tuple_slice = image_from_tuple[0, 0, 0, -10:] assert image.shape == (1, 2, 24, 24) expected_slice = np.array( [ -7172.837, -3438.855, -1093.312, 388.8835, -7471.467, -7998.1206, -5328.259, 218.00089, -2731.5745, -8056.734, ] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5e-2 @skip_mps def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical( expected_max_diff=2e-1, ) @skip_mps def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" test_mean_pixel_difference = False self._test_attention_slicing_forward_pass( test_max_difference=test_max_difference, test_mean_pixel_difference=test_mean_pixel_difference, ) @unittest.skip(reason="flaky for now") def test_float16_inference(self): super().test_float16_inference() # override because we need to make sure latent_mean and latent_std to be 0 def test_callback_inputs(self): components = self.get_dummy_components() components["latent_mean"] = 0 components["latent_std"] = 0 pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_test(pipe, i, t, callback_kwargs): missing_callback_inputs = set() for v in pipe._callback_tensor_inputs: if v not in callback_kwargs: missing_callback_inputs.add(v) self.assertTrue( len(missing_callback_inputs) == 0, f"Missing callback tensor inputs: {missing_callback_inputs}" ) last_i = pipe.num_timesteps - 1 if i == last_i: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) return callback_kwargs inputs = self.get_dummy_inputs(torch_device) inputs["callback_on_step_end"] = callback_inputs_test inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs inputs["output_type"] = "latent" output = pipe(**inputs)[0] assert output.abs().sum() == 0 def check_if_lora_correctly_set(self, model) -> bool: """ Checks if the LoRA layers are correctly set with peft """ for module in model.modules(): if isinstance(module, BaseTunerLayer): return True return False def get_lora_components(self): prior = self.dummy_prior prior_lora_config = LoraConfig( r=4, lora_alpha=4, target_modules=["to_q", "to_k", "to_v", "to_out.0"], init_lora_weights=False ) prior_lora_attn_procs, prior_lora_layers = create_prior_lora_layers(prior) lora_components = { "prior_lora_layers": prior_lora_layers, "prior_lora_attn_procs": prior_lora_attn_procs, } return prior, prior_lora_config, lora_components @require_peft_backend def test_inference_with_prior_lora(self): _, prior_lora_config, _ = self.get_lora_components() device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output_no_lora = pipe(**self.get_dummy_inputs(device)) image_embed = output_no_lora.image_embeddings self.assertTrue(image_embed.shape == (1, 2, 24, 24)) pipe.prior.add_adapter(prior_lora_config) self.assertTrue(self.check_if_lora_correctly_set(pipe.prior), "Lora not correctly set in prior") output_lora = pipe(**self.get_dummy_inputs(device)) lora_image_embed = output_lora.image_embeddings self.assertTrue(image_embed.shape == lora_image_embed.shape)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/wuerstchen/test_wuerstchen_combined.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import DDPMWuerstchenScheduler, WuerstchenCombinedPipeline from diffusers.pipelines.wuerstchen import PaellaVQModel, WuerstchenDiffNeXt, WuerstchenPrior from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class WuerstchenCombinedPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = WuerstchenCombinedPipeline params = ["prompt"] batch_params = ["prompt", "negative_prompt"] required_optional_params = [ "generator", "height", "width", "latents", "prior_guidance_scale", "decoder_guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "prior_num_inference_steps", "output_type", "return_dict", ] test_xformers_attention = True @property def text_embedder_hidden_size(self): return 32 @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = {"c_in": 2, "c": 8, "depth": 2, "c_cond": 32, "c_r": 8, "nhead": 2} model = WuerstchenPrior(**model_kwargs) return model.eval() @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_prior_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(config).eval() @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, projection_dim=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(config).eval() @property def dummy_vqgan(self): torch.manual_seed(0) model_kwargs = { "bottleneck_blocks": 1, "num_vq_embeddings": 2, } model = PaellaVQModel(**model_kwargs) return model.eval() @property def dummy_decoder(self): torch.manual_seed(0) model_kwargs = { "c_cond": self.text_embedder_hidden_size, "c_hidden": [320], "nhead": [-1], "blocks": [4], "level_config": ["CT"], "clip_embd": self.text_embedder_hidden_size, "inject_effnet": [False], } model = WuerstchenDiffNeXt(**model_kwargs) return model.eval() def get_dummy_components(self): prior = self.dummy_prior prior_text_encoder = self.dummy_prior_text_encoder scheduler = DDPMWuerstchenScheduler() tokenizer = self.dummy_tokenizer text_encoder = self.dummy_text_encoder decoder = self.dummy_decoder vqgan = self.dummy_vqgan components = { "tokenizer": tokenizer, "text_encoder": text_encoder, "decoder": decoder, "vqgan": vqgan, "scheduler": scheduler, "prior_prior": prior, "prior_text_encoder": prior_text_encoder, "prior_tokenizer": tokenizer, "prior_scheduler": scheduler, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "generator": generator, "prior_guidance_scale": 4.0, "decoder_guidance_scale": 4.0, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "np", "height": 128, "width": 128, } return inputs def test_wuerstchen(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe(**self.get_dummy_inputs(device), return_dict=False)[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[-3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.7616304, 0.0, 1.0, 0.0, 1.0, 0.0, 0.05925313, 0.0, 0.951898]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) @unittest.skip(reason="flakey and float16 requires CUDA") def test_float16_inference(self): super().test_float16_inference() def test_callback_inputs(self): pass def test_callback_cfg(self): pass
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/wuerstchen/test_wuerstchen_decoder.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import DDPMWuerstchenScheduler, WuerstchenDecoderPipeline from diffusers.pipelines.wuerstchen import PaellaVQModel, WuerstchenDiffNeXt from diffusers.utils.testing_utils import enable_full_determinism, skip_mps, torch_device from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class WuerstchenDecoderPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = WuerstchenDecoderPipeline params = ["prompt"] batch_params = ["image_embeddings", "prompt", "negative_prompt"] required_optional_params = [ "num_images_per_prompt", "num_inference_steps", "latents", "negative_prompt", "guidance_scale", "output_type", "return_dict", ] test_xformers_attention = False callback_cfg_params = ["image_embeddings", "text_encoder_hidden_states"] @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, projection_dim=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(config).eval() @property def dummy_vqgan(self): torch.manual_seed(0) model_kwargs = { "bottleneck_blocks": 1, "num_vq_embeddings": 2, } model = PaellaVQModel(**model_kwargs) return model.eval() @property def dummy_decoder(self): torch.manual_seed(0) model_kwargs = { "c_cond": self.text_embedder_hidden_size, "c_hidden": [320], "nhead": [-1], "blocks": [4], "level_config": ["CT"], "clip_embd": self.text_embedder_hidden_size, "inject_effnet": [False], } model = WuerstchenDiffNeXt(**model_kwargs) return model.eval() def get_dummy_components(self): decoder = self.dummy_decoder text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer vqgan = self.dummy_vqgan scheduler = DDPMWuerstchenScheduler() components = { "decoder": decoder, "vqgan": vqgan, "text_encoder": text_encoder, "tokenizer": tokenizer, "scheduler": scheduler, "latent_dim_scale": 4.0, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image_embeddings": torch.ones((1, 4, 4, 4), device=device), "prompt": "horse", "generator": generator, "guidance_scale": 1.0, "num_inference_steps": 2, "output_type": "np", } return inputs def test_wuerstchen_decoder(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe(**self.get_dummy_inputs(device), return_dict=False) image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.0000, 0.0000, 0.0089, 1.0000, 1.0000, 0.3927, 1.0000, 1.0000, 1.0000]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @skip_mps def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-5) @skip_mps def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" test_mean_pixel_difference = False self._test_attention_slicing_forward_pass( test_max_difference=test_max_difference, test_mean_pixel_difference=test_mean_pixel_difference, ) @unittest.skip(reason="bf16 not supported and requires CUDA") def test_float16_inference(self): super().test_float16_inference()
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/text_to_video_synthesis/test_text_to_video.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, TextToVideoSDPipeline, UNet3DConditionModel, ) from diffusers.utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class TextToVideoSDPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = TextToVideoSDPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def get_dummy_components(self): torch.manual_seed(0) unet = UNet3DConditionModel( block_out_channels=(4, 8), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock3D", "DownBlock3D"), up_block_types=("UpBlock3D", "CrossAttnUpBlock3D"), cross_attention_dim=4, attention_head_dim=4, norm_num_groups=2, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=(8,), in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D"], latent_channels=4, sample_size=32, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=4, intermediate_size=16, layer_norm_eps=1e-05, num_attention_heads=2, num_hidden_layers=2, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def test_text_to_video_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = TextToVideoSDPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["output_type"] = "np" frames = sd_pipe(**inputs).frames image_slice = frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) expected_slice = np.array([192.0, 44.0, 157.0, 140.0, 108.0, 104.0, 123.0, 144.0, 129.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @unittest.skipIf(torch_device != "cuda", reason="Feature isn't heavily used. Test in CUDA environment only.") def test_attention_slicing_forward_pass(self): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=False, expected_max_diff=3e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False, expected_max_diff=1e-2) # (todo): sayakpaul @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.") def test_inference_batch_consistent(self): pass # (todo): sayakpaul @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.") def test_inference_batch_single_identical(self): pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.") def test_num_images_per_prompt(self): pass def test_progress_bar(self): return super().test_progress_bar() @slow @skip_mps @require_torch_gpu class TextToVideoSDPipelineSlowTests(unittest.TestCase): def test_two_step_model(self): expected_video = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" ) pipe = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b") pipe = pipe.to(torch_device) prompt = "Spiderman is surfing" generator = torch.Generator(device="cpu").manual_seed(0) video_frames = pipe(prompt, generator=generator, num_inference_steps=2, output_type="pt").frames video = video_frames.cpu().numpy() assert np.abs(expected_video - video).mean() < 5e-2 def test_two_step_model_with_freeu(self): expected_video = [] pipe = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b") pipe = pipe.to(torch_device) prompt = "Spiderman is surfing" generator = torch.Generator(device="cpu").manual_seed(0) pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4) video_frames = pipe(prompt, generator=generator, num_inference_steps=2, output_type="pt").frames video = video_frames.cpu().numpy() video = video[0, 0, -3:, -3:, -1].flatten() expected_video = [-0.3102, -0.2477, -0.1772, -0.648, -0.6176, -0.5484, -0.0217, -0.056, -0.0177] assert np.abs(expected_video - video).mean() < 5e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import DDIMScheduler, TextToVideoZeroPipeline from diffusers.utils.testing_utils import load_pt, nightly, require_torch_gpu from ..test_pipelines_common import assert_mean_pixel_difference @nightly @require_torch_gpu class TextToVideoZeroPipelineSlowTests(unittest.TestCase): def test_full_model(self): model_id = "runwayml/stable-diffusion-v1-5" pipe = TextToVideoZeroPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) generator = torch.Generator(device="cuda").manual_seed(0) prompt = "A bear is playing a guitar on Times Square" result = pipe(prompt=prompt, generator=generator).images expected_result = load_pt( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text-to-video/A bear is playing a guitar on Times Square.pt" ) assert_mean_pixel_difference(result, expected_result)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/text_to_video_synthesis/test_video_to_video.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNet3DConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, is_flaky, nightly, numpy_cosine_similarity_distance, skip_mps, torch_device, ) from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class VideoToVideoSDPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = VideoToVideoSDPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"} required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} test_attention_slicing = False # No `output_type`. required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def get_dummy_components(self): torch.manual_seed(0) unet = UNet3DConditionModel( block_out_channels=(4, 8), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock3D", "DownBlock3D"), up_block_types=("UpBlock3D", "CrossAttnUpBlock3D"), cross_attention_dim=32, attention_head_dim=4, norm_num_groups=2, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=True, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[ 8, ], in_channels=3, out_channels=3, down_block_types=[ "DownEncoderBlock2D", ], up_block_types=["UpDecoderBlock2D"], latent_channels=4, sample_size=32, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=512, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): # 3 frames video = floats_tensor((1, 3, 3, 32, 32), rng=random.Random(seed)).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "video": video, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def test_text_to_video_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = VideoToVideoSDPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["output_type"] = "np" frames = sd_pipe(**inputs).frames image_slice = frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) expected_slice = np.array([162.0, 136.0, 132.0, 140.0, 139.0, 137.0, 169.0, 134.0, 132.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @is_flaky() def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=0.001) @is_flaky() def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent() @is_flaky() def test_save_load_local(self): super().test_save_load_local() @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False, expected_max_diff=5e-3) # (todo): sayakpaul @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.") def test_inference_batch_consistent(self): pass # (todo): sayakpaul @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.") def test_inference_batch_single_identical(self): pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.") def test_num_images_per_prompt(self): pass def test_progress_bar(self): return super().test_progress_bar() @nightly @skip_mps class VideoToVideoSDPipelineSlowTests(unittest.TestCase): def test_two_step_model(self): pipe = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16) pipe.enable_model_cpu_offload() # 10 frames generator = torch.Generator(device="cpu").manual_seed(0) video = torch.randn((1, 10, 3, 320, 576), generator=generator) prompt = "Spiderman is surfing" video_frames = pipe(prompt, video=video, generator=generator, num_inference_steps=3, output_type="pt").frames expected_array = np.array([-0.9770508, -0.8027344, -0.62646484, -0.8334961, -0.7573242]) output_array = video_frames.cpu().numpy()[0, 0, 0, 0, -5:] assert numpy_cosine_similarity_distance(expected_array, output_array) < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/consistency_models/test_consistency_models.py
import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNet2DModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, nightly, require_torch_2, require_torch_gpu, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class ConsistencyModelPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = ConsistencyModelPipeline params = UNCONDITIONAL_IMAGE_GENERATION_PARAMS batch_params = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) @property def dummy_uncond_unet(self): unet = UNet2DModel.from_pretrained( "diffusers/consistency-models-test", subfolder="test_unet", ) return unet @property def dummy_cond_unet(self): unet = UNet2DModel.from_pretrained( "diffusers/consistency-models-test", subfolder="test_unet_class_cond", ) return unet def get_dummy_components(self, class_cond=False): if class_cond: unet = self.dummy_cond_unet else: unet = self.dummy_uncond_unet # Default to CM multistep sampler scheduler = CMStochasticIterativeScheduler( num_train_timesteps=40, sigma_min=0.002, sigma_max=80.0, ) components = { "unet": unet, "scheduler": scheduler, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "batch_size": 1, "num_inference_steps": None, "timesteps": [22, 0], "generator": generator, "output_type": "np", } return inputs def test_consistency_model_pipeline_multistep(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = ConsistencyModelPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_consistency_model_pipeline_multistep_class_cond(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(class_cond=True) pipe = ConsistencyModelPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["class_labels"] = 0 image = pipe(**inputs).images assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_consistency_model_pipeline_onestep(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = ConsistencyModelPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["num_inference_steps"] = 1 inputs["timesteps"] = None image = pipe(**inputs).images assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_consistency_model_pipeline_onestep_class_cond(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(class_cond=True) pipe = ConsistencyModelPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["num_inference_steps"] = 1 inputs["timesteps"] = None inputs["class_labels"] = 0 image = pipe(**inputs).images assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 @nightly @require_torch_gpu class ConsistencyModelPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, seed=0, get_fixed_latents=False, device="cpu", dtype=torch.float32, shape=(1, 3, 64, 64)): generator = torch.manual_seed(seed) inputs = { "num_inference_steps": None, "timesteps": [22, 0], "class_labels": 0, "generator": generator, "output_type": "np", } if get_fixed_latents: latents = self.get_fixed_latents(seed=seed, device=device, dtype=dtype, shape=shape) inputs["latents"] = latents return inputs def get_fixed_latents(self, seed=0, device="cpu", dtype=torch.float32, shape=(1, 3, 64, 64)): if isinstance(device, str): device = torch.device(device) generator = torch.Generator(device=device).manual_seed(seed) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) return latents def test_consistency_model_cd_multistep(self): unet = UNet2DModel.from_pretrained("diffusers/consistency_models", subfolder="diffusers_cd_imagenet64_l2") scheduler = CMStochasticIterativeScheduler( num_train_timesteps=40, sigma_min=0.002, sigma_max=80.0, ) pipe = ConsistencyModelPipeline(unet=unet, scheduler=scheduler) pipe.to(torch_device=torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs() image = pipe(**inputs).images assert image.shape == (1, 64, 64, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.0146, 0.0158, 0.0092, 0.0086, 0.0000, 0.0000, 0.0000, 0.0000, 0.0058]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_consistency_model_cd_onestep(self): unet = UNet2DModel.from_pretrained("diffusers/consistency_models", subfolder="diffusers_cd_imagenet64_l2") scheduler = CMStochasticIterativeScheduler( num_train_timesteps=40, sigma_min=0.002, sigma_max=80.0, ) pipe = ConsistencyModelPipeline(unet=unet, scheduler=scheduler) pipe.to(torch_device=torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs() inputs["num_inference_steps"] = 1 inputs["timesteps"] = None image = pipe(**inputs).images assert image.shape == (1, 64, 64, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.0059, 0.0003, 0.0000, 0.0023, 0.0052, 0.0007, 0.0165, 0.0081, 0.0095]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 @require_torch_2 def test_consistency_model_cd_multistep_flash_attn(self): unet = UNet2DModel.from_pretrained("diffusers/consistency_models", subfolder="diffusers_cd_imagenet64_l2") scheduler = CMStochasticIterativeScheduler( num_train_timesteps=40, sigma_min=0.002, sigma_max=80.0, ) pipe = ConsistencyModelPipeline(unet=unet, scheduler=scheduler) pipe.to(torch_device=torch_device, torch_dtype=torch.float16) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(get_fixed_latents=True, device=torch_device) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False): image = pipe(**inputs).images assert image.shape == (1, 64, 64, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.1845, 0.1371, 0.1211, 0.2035, 0.1954, 0.1323, 0.1773, 0.1593, 0.1314]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 @require_torch_2 def test_consistency_model_cd_onestep_flash_attn(self): unet = UNet2DModel.from_pretrained("diffusers/consistency_models", subfolder="diffusers_cd_imagenet64_l2") scheduler = CMStochasticIterativeScheduler( num_train_timesteps=40, sigma_min=0.002, sigma_max=80.0, ) pipe = ConsistencyModelPipeline(unet=unet, scheduler=scheduler) pipe.to(torch_device=torch_device, torch_dtype=torch.float16) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(get_fixed_latents=True, device=torch_device) inputs["num_inference_steps"] = 1 inputs["timesteps"] = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False): image = pipe(**inputs).images assert image.shape == (1, 64, 64, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.1623, 0.2009, 0.2387, 0.1731, 0.1168, 0.1202, 0.2031, 0.1327, 0.2447]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNet2DConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils.testing_utils import floats_tensor, nightly, require_torch_gpu, torch_device class SafeDiffusionPipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def dummy_image(self): batch_size = 1 num_channels = 3 sizes = (32, 32) image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) return image @property def dummy_cond_unet(self): torch.manual_seed(0) model = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) return model @property def dummy_vae(self): torch.manual_seed(0) model = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) return model @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(config) @property def dummy_extractor(self): def extract(*args, **kwargs): class Out: def __init__(self): self.pixel_values = torch.ones([0]) def to(self, device): self.pixel_values.to(device) return self return Out() return extract def test_safe_diffusion_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unet = self.dummy_cond_unet scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.Generator(device=device).manual_seed(0) output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") image = output.images generator = torch.Generator(device=device).manual_seed(0) image_from_tuple = sd_pipe( [prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np", return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_pndm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unet = self.dummy_cond_unet scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.Generator(device=device).manual_seed(0) output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") image = output.images generator = torch.Generator(device=device).manual_seed(0) image_from_tuple = sd_pipe( [prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np", return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_no_safety_checker(self): pipe = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-lms-pipe", safety_checker=None ) assert isinstance(pipe, StableDiffusionPipeline) assert isinstance(pipe.scheduler, LMSDiscreteScheduler) assert pipe.safety_checker is None image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(tmpdirname) pipe = StableDiffusionPipeline.from_pretrained(tmpdirname) # sanity check that the pipeline still works assert pipe.safety_checker is None image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") def test_stable_diffusion_fp16(self): """Test that stable diffusion works with fp16""" unet = self.dummy_cond_unet scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") # put models in fp16 unet = unet.half() vae = vae.half() bert = bert.half() # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" image = sd_pipe([prompt], num_inference_steps=2, output_type="np").images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class SafeDiffusionPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_harm_safe_stable_diffusion(self): sd_pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None) sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) prompt = ( "portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle" " coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with" " anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and" " children from bahnhof zoo, detailed " ) seed = 4003660346 guidance_scale = 7 # without safety guidance (sld_guidance_scale = 0) generator = torch.manual_seed(seed) output = sd_pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, sld_guidance_scale=0, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 # without safety guidance (strong configuration) generator = torch.manual_seed(seed) output = sd_pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, sld_guidance_scale=2000, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_nudity_safe_stable_diffusion(self): sd_pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None) sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) prompt = "padme amidala taking a bath artwork, safe for work, no nudity" seed = 2734971755 guidance_scale = 7 generator = torch.manual_seed(seed) output = sd_pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, sld_guidance_scale=0, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 generator = torch.manual_seed(seed) output = sd_pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, sld_guidance_scale=2000, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_nudity_safetychecker_safe_stable_diffusion(self): sd_pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) prompt = ( "the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c." " leyendecker" ) seed = 1044355234 guidance_scale = 12 generator = torch.manual_seed(seed) output = sd_pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, sld_guidance_scale=0, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-7 generator = torch.manual_seed(seed) output = sd_pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, sld_guidance_scale=2000, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561]) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImg2ImgPipeline, AutoencoderKL, PNDMScheduler, UNet2DConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import load_image from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_numpy, nightly, require_torch_gpu, torch_device, ) enable_full_determinism() class AltDiffusionImg2ImgPipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def dummy_image(self): batch_size = 1 num_channels = 3 sizes = (32, 32) image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) return image @property def dummy_cond_unet(self): torch.manual_seed(0) model = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) return model @property def dummy_vae(self): torch.manual_seed(0) model = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) return model @property def dummy_text_encoder(self): torch.manual_seed(0) config = RobertaSeriesConfig( hidden_size=32, project_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=5006, ) return RobertaSeriesModelWithTransformation(config) @property def dummy_extractor(self): def extract(*args, **kwargs): class Out: def __init__(self): self.pixel_values = torch.ones([0]) def to(self, device): self.pixel_values.to(device) return self return Out() return extract def test_stable_diffusion_img2img_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unet = self.dummy_cond_unet scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta") tokenizer.model_max_length = 77 init_image = self.dummy_image.to(device) init_image = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk alt_pipe = AltDiffusionImg2ImgPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, image_encoder=None, ) alt_pipe.image_processor = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=True) alt_pipe = alt_pipe.to(device) alt_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.Generator(device=device).manual_seed(0) output = alt_pipe( [prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np", image=init_image, ) image = output.images generator = torch.Generator(device=device).manual_seed(0) image_from_tuple = alt_pipe( [prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np", image=init_image, return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5e-3 @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") def test_stable_diffusion_img2img_fp16(self): """Test that stable diffusion img2img works with fp16""" unet = self.dummy_cond_unet scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta") tokenizer.model_max_length = 77 init_image = self.dummy_image.to(torch_device) # put models in fp16 unet = unet.half() vae = vae.half() bert = bert.half() # make sure here that pndm scheduler skips prk alt_pipe = AltDiffusionImg2ImgPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, image_encoder=None, ) alt_pipe.image_processor = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=False) alt_pipe = alt_pipe.to(torch_device) alt_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) image = alt_pipe( [prompt], generator=generator, num_inference_steps=2, output_type="np", image=init_image, ).images assert image.shape == (1, 32, 32, 3) @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") def test_stable_diffusion_img2img_pipeline_multiple_of_8(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) # resize to resolution that is divisible by 8 but not 16 or 32 init_image = init_image.resize((760, 504)) model_id = "BAAI/AltDiffusion" pipe = AltDiffusionImg2ImgPipeline.from_pretrained( model_id, safety_checker=None, ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() prompt = "A fantasy landscape, trending on artstation" generator = torch.manual_seed(0) output = pipe( prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5, generator=generator, output_type="np", ) image = output.images[0] image_slice = image[255:258, 383:386, -1] assert image.shape == (504, 760, 3) expected_slice = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @nightly @require_torch_gpu class AltDiffusionImg2ImgPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_diffusion_img2img_pipeline_default(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) init_image = init_image.resize((768, 512)) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" ) model_id = "BAAI/AltDiffusion" pipe = AltDiffusionImg2ImgPipeline.from_pretrained( model_id, safety_checker=None, ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() prompt = "A fantasy landscape, trending on artstation" generator = torch.manual_seed(0) output = pipe( prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (512, 768, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/altdiffusion/test_alt_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNet2DConditionModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class AltDiffusionPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = AltDiffusionPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) # TODO: address the non-deterministic text encoder (fails for save-load tests) # torch.manual_seed(0) # text_encoder_config = RobertaSeriesConfig( # hidden_size=32, # project_dim=32, # intermediate_size=37, # layer_norm_eps=1e-05, # num_attention_heads=4, # num_hidden_layers=5, # vocab_size=5002, # ) # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, projection_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=5002, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta") tokenizer.model_max_length = 77 components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) def test_alt_diffusion_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() torch.manual_seed(0) text_encoder_config = RobertaSeriesConfig( hidden_size=32, project_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, vocab_size=5002, ) # TODO: remove after fixing the non-deterministic text encoder text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) components["text_encoder"] = text_encoder alt_pipe = AltDiffusionPipeline(**components) alt_pipe = alt_pipe.to(device) alt_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["prompt"] = "A photo of an astronaut" output = alt_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_alt_diffusion_pndm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) text_encoder_config = RobertaSeriesConfig( hidden_size=32, project_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, vocab_size=5002, ) # TODO: remove after fixing the non-deterministic text encoder text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) components["text_encoder"] = text_encoder alt_pipe = AltDiffusionPipeline(**components) alt_pipe = alt_pipe.to(device) alt_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = alt_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @nightly @require_torch_gpu class AltDiffusionPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_alt_diffusion(self): # make sure here that pndm scheduler skips prk alt_pipe = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion", safety_checker=None) alt_pipe = alt_pipe.to(torch_device) alt_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) output = alt_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=20, output_type="np") image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_alt_diffusion_fast_ddim(self): scheduler = DDIMScheduler.from_pretrained("BAAI/AltDiffusion", subfolder="scheduler") alt_pipe = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion", scheduler=scheduler, safety_checker=None) alt_pipe = alt_pipe.to(torch_device) alt_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) output = alt_pipe([prompt], generator=generator, num_inference_steps=2, output_type="numpy") image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/blipdiffusion/test_blipdiffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTokenizer from transformers.models.blip_2.configuration_blip_2 import Blip2Config from transformers.models.clip.configuration_clip import CLIPTextConfig from diffusers import AutoencoderKL, BlipDiffusionPipeline, PNDMScheduler, UNet2DConditionModel from diffusers.utils.testing_utils import enable_full_determinism from src.diffusers.pipelines.blip_diffusion.blip_image_processing import BlipImageProcessor from src.diffusers.pipelines.blip_diffusion.modeling_blip2 import Blip2QFormerModel from src.diffusers.pipelines.blip_diffusion.modeling_ctx_clip import ContextCLIPTextModel from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class BlipDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = BlipDiffusionPipeline params = [ "prompt", "reference_image", "source_subject_category", "target_subject_category", ] batch_params = [ "prompt", "reference_image", "source_subject_category", "target_subject_category", ] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "neg_prompt", "guidance_scale", "prompt_strength", "prompt_reps", ] def get_dummy_components(self): torch.manual_seed(0) text_encoder_config = CLIPTextConfig( vocab_size=1000, hidden_size=16, intermediate_size=16, projection_dim=16, num_hidden_layers=1, num_attention_heads=1, max_position_embeddings=77, ) text_encoder = ContextCLIPTextModel(text_encoder_config) vae = AutoencoderKL( in_channels=4, out_channels=4, down_block_types=("DownEncoderBlock2D",), up_block_types=("UpDecoderBlock2D",), block_out_channels=(32,), layers_per_block=1, act_fn="silu", latent_channels=4, norm_num_groups=16, sample_size=16, ) blip_vision_config = { "hidden_size": 16, "intermediate_size": 16, "num_hidden_layers": 1, "num_attention_heads": 1, "image_size": 224, "patch_size": 14, "hidden_act": "quick_gelu", } blip_qformer_config = { "vocab_size": 1000, "hidden_size": 16, "num_hidden_layers": 1, "num_attention_heads": 1, "intermediate_size": 16, "max_position_embeddings": 512, "cross_attention_frequency": 1, "encoder_hidden_size": 16, } qformer_config = Blip2Config( vision_config=blip_vision_config, qformer_config=blip_qformer_config, num_query_tokens=16, tokenizer="hf-internal-testing/tiny-random-bert", ) qformer = Blip2QFormerModel(qformer_config) unet = UNet2DConditionModel( block_out_channels=(16, 32), norm_num_groups=16, layers_per_block=1, sample_size=16, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=16, ) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") scheduler = PNDMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", set_alpha_to_one=False, skip_prk_steps=True, ) vae.eval() qformer.eval() text_encoder.eval() image_processor = BlipImageProcessor() components = { "text_encoder": text_encoder, "vae": vae, "qformer": qformer, "unet": unet, "tokenizer": tokenizer, "scheduler": scheduler, "image_processor": image_processor, } return components def get_dummy_inputs(self, device, seed=0): np.random.seed(seed) reference_image = np.random.rand(32, 32, 3) * 255 reference_image = Image.fromarray(reference_image.astype("uint8")).convert("RGBA") if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "swimming underwater", "generator": generator, "reference_image": reference_image, "source_subject_category": "dog", "target_subject_category": "dog", "height": 32, "width": 32, "guidance_scale": 7.5, "num_inference_steps": 2, "output_type": "np", } return inputs def test_blipdiffusion(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) image = pipe(**self.get_dummy_inputs(device))[0] image_slice = image[0, -3:, -3:, 0] assert image.shape == (1, 16, 16, 4) expected_slice = np.array([0.7096, 0.5900, 0.6703, 0.4032, 0.7766, 0.3629, 0.5447, 0.4149, 0.8172]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {image_slice.flatten()}, but got {image_slice.flatten()}"
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/schedulers/test_scheduler_unclip.py
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest # UnCLIPScheduler is a modified DDPMScheduler with a subset of the configuration. class UnCLIPSchedulerTest(SchedulerCommonTest): scheduler_classes = (UnCLIPScheduler,) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "variance_type": "fixed_small_log", "clip_sample": True, "clip_sample_range": 1.0, "prediction_type": "epsilon", } config.update(**kwargs) return config def test_timesteps(self): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_variance_type(self): for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=variance) def test_clip_sample(self): for clip_sample in [True, False]: self.check_over_configs(clip_sample=clip_sample) def test_clip_sample_range(self): for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=clip_sample_range) def test_prediction_type(self): for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=prediction_type) def test_time_indices(self): for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=time_step, prev_timestep=prev_timestep) def test_variance_fixed_small_log(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(variance_type="fixed_small_log") scheduler = scheduler_class(**scheduler_config) assert torch.sum(torch.abs(scheduler._get_variance(0) - 1.0000e-10)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0549625)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.9994987)) < 1e-5 def test_variance_learned_range(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(variance_type="learned_range") scheduler = scheduler_class(**scheduler_config) predicted_variance = 0.5 assert scheduler._get_variance(1, predicted_variance=predicted_variance) - -10.1712790 < 1e-5 assert scheduler._get_variance(487, predicted_variance=predicted_variance) - -5.7998052 < 1e-5 assert scheduler._get_variance(999, predicted_variance=predicted_variance) - -0.0010011 < 1e-5 def test_full_loop(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = scheduler.timesteps model = self.dummy_model() sample = self.dummy_sample_deter generator = torch.manual_seed(0) for i, t in enumerate(timesteps): # 1. predict noise residual residual = model(sample, t) # 2. predict previous mean of sample x_t-1 pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample sample = pred_prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 252.2682495) < 1e-2 assert abs(result_mean.item() - 0.3284743) < 1e-3 def test_full_loop_skip_timesteps(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(25) timesteps = scheduler.timesteps model = self.dummy_model() sample = self.dummy_sample_deter generator = torch.manual_seed(0) for i, t in enumerate(timesteps): # 1. predict noise residual residual = model(sample, t) if i + 1 == timesteps.shape[0]: prev_timestep = None else: prev_timestep = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 pred_prev_sample = scheduler.step( residual, t, sample, prev_timestep=prev_timestep, generator=generator ).prev_sample sample = pred_prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 258.2044983) < 1e-2 assert abs(result_mean.item() - 0.3362038) < 1e-3 def test_trained_betas(self): pass def test_add_noise_device(self): pass
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/schedulers/test_scheduler_kdpm2_discrete.py
import torch from diffusers import KDPM2DiscreteScheduler from diffusers.utils.testing_utils import torch_device from .test_schedulers import SchedulerCommonTest class KDPM2DiscreteSchedulerTest(SchedulerCommonTest): scheduler_classes = (KDPM2DiscreteScheduler,) num_inference_steps = 10 def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1100, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**kwargs) return config def test_timesteps(self): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_betas(self): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=schedule) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_full_loop_with_v_prediction(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(prediction_type="v_prediction") scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma sample = sample.to(torch_device) for i, t in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.6934e-07) < 1e-2 assert abs(result_mean.item() - 6.1112e-10) < 1e-3 else: # CUDA assert abs(result_sum.item() - 4.693428650170972e-07) < 1e-2 assert abs(result_mean.item() - 0.0002) < 1e-3 def test_full_loop_no_noise(self): if torch_device == "mps": return scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma sample = sample.to(torch_device) for i, t in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.4125) < 1e-2 assert abs(result_mean.item() - 0.0266) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.4125) < 1e-2 assert abs(result_mean.item() - 0.0266) < 1e-3 def test_full_loop_device(self): if torch_device == "mps": return scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps, device=torch_device) model = self.dummy_model() sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma for t in scheduler.timesteps: sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) if str(torch_device).startswith("cpu"): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.4125) < 1e-2 assert abs(result_mean.item() - 0.0266) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.4125) < 1e-2 assert abs(result_mean.item() - 0.0266) < 1e-3 def test_full_loop_with_noise(self): if torch_device == "mps": return scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma sample = sample.to(torch_device) # add noise t_start = self.num_inference_steps - 2 noise = self.dummy_noise_deter noise = noise.to(sample.device) timesteps = scheduler.timesteps[t_start * scheduler.order :] sample = scheduler.add_noise(sample, noise, timesteps[:1]) for i, t in enumerate(timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 70408.4062) < 1e-2, f" expected result sum 70408.4062, but get {result_sum}" assert abs(result_mean.item() - 91.6776) < 1e-3, f" expected result mean 91.6776, but get {result_mean}"
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/schedulers/test_scheduler_deis.py
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class DEISMultistepSchedulerTest(SchedulerCommonTest): scheduler_classes = (DEISMultistepScheduler,) forward_default_kwargs = (("num_inference_steps", 25),) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "solver_order": 2, } config.update(**kwargs) return config def check_over_configs(self, time_step=0, **config): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) sample = self.dummy_sample residual = 0.1 * sample dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) new_scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order] output, new_output = sample, sample for t in range(time_step, time_step + scheduler.config.solver_order + 1): t = scheduler.timesteps[t] output = scheduler.step(residual, t, output, **kwargs).prev_sample new_output = new_scheduler.step(residual, t, new_output, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def test_from_save_pretrained(self): pass def check_over_forward(self, time_step=0, **forward_kwargs): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) sample = self.dummy_sample residual = 0.1 * sample dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals (must be after setting timesteps) scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) # copy over dummy past residuals new_scheduler.set_timesteps(num_inference_steps) # copy over dummy past residual (must be after setting timesteps) new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order] output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def full_loop(self, scheduler=None, **config): if scheduler is None: scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) num_inference_steps = 10 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_timesteps(num_inference_steps) for i, t in enumerate(scheduler.timesteps): residual = model(sample, t) sample = scheduler.step(residual, t, sample).prev_sample return sample def test_step_shape(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) sample = self.dummy_sample residual = 0.1 * sample if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): scheduler.set_timesteps(num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] time_step_0 = scheduler.timesteps[5] time_step_1 = scheduler.timesteps[6] output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) def test_switch(self): # make sure that iterating over schedulers with same config names gives same results # for defaults scheduler = DEISMultistepScheduler(**self.get_scheduler_config()) sample = self.full_loop(scheduler=scheduler) result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.23916) < 1e-3 scheduler = DPMSolverSinglestepScheduler.from_config(scheduler.config) scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config) scheduler = UniPCMultistepScheduler.from_config(scheduler.config) scheduler = DEISMultistepScheduler.from_config(scheduler.config) sample = self.full_loop(scheduler=scheduler) result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.23916) < 1e-3 def test_timesteps(self): for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_thresholding(self): self.check_over_configs(thresholding=False) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=True, prediction_type=prediction_type, sample_max_value=threshold, algorithm_type="deis", solver_order=order, solver_type=solver_type, ) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_solver_order_and_type(self): for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=order, solver_type=solver_type, prediction_type=prediction_type, algorithm_type=algorithm_type, ) sample = self.full_loop( solver_order=order, solver_type=solver_type, prediction_type=prediction_type, algorithm_type=algorithm_type, ) assert not torch.isnan(sample).any(), "Samples have nan numbers" def test_lower_order_final(self): self.check_over_configs(lower_order_final=True) self.check_over_configs(lower_order_final=False) def test_inference_steps(self): for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=num_inference_steps, time_step=0) def test_full_loop_no_noise(self): sample = self.full_loop() result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.23916) < 1e-3 def test_full_loop_with_v_prediction(self): sample = self.full_loop(prediction_type="v_prediction") result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.091) < 1e-3 def test_fp16_support(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(thresholding=True, dynamic_thresholding_ratio=0) scheduler = scheduler_class(**scheduler_config) num_inference_steps = 10 model = self.dummy_model() sample = self.dummy_sample_deter.half() scheduler.set_timesteps(num_inference_steps) for i, t in enumerate(scheduler.timesteps): residual = model(sample, t) sample = scheduler.step(residual, t, sample).prev_sample assert sample.dtype == torch.float16 def test_full_loop_with_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) num_inference_steps = 10 t_start = 8 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_timesteps(num_inference_steps) # add noise noise = self.dummy_noise_deter timesteps = scheduler.timesteps[t_start * scheduler.order :] sample = scheduler.add_noise(sample, noise, timesteps[:1]) for i, t in enumerate(timesteps): residual = model(sample, t) sample = scheduler.step(residual, t, sample).prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 315.3016) < 1e-2, f" expected result sum 315.3016, but get {result_sum}" assert abs(result_mean.item() - 0.41054) < 1e-3, f" expected result mean 0.41054, but get {result_mean}"
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/schedulers/test_scheduler_lms.py
import torch from diffusers import LMSDiscreteScheduler from diffusers.utils.testing_utils import torch_device from .test_schedulers import SchedulerCommonTest class LMSDiscreteSchedulerTest(SchedulerCommonTest): scheduler_classes = (LMSDiscreteScheduler,) num_inference_steps = 10 def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1100, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**kwargs) return config def test_timesteps(self): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_betas(self): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=schedule) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_time_indices(self): for t in [0, 500, 800]: self.check_over_forward(time_step=t) def test_full_loop_no_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 1006.388) < 1e-2 assert abs(result_mean.item() - 1.31) < 1e-3 def test_full_loop_with_v_prediction(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(prediction_type="v_prediction") scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 0.0017) < 1e-2 assert abs(result_mean.item() - 2.2676e-06) < 1e-3 def test_full_loop_device(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps, device=torch_device) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() sample = sample.to(torch_device) for i, t in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 1006.388) < 1e-2 assert abs(result_mean.item() - 1.31) < 1e-3 def test_full_loop_device_karras_sigmas(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config, use_karras_sigmas=True) scheduler.set_timesteps(self.num_inference_steps, device=torch_device) model = self.dummy_model() sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma sample = sample.to(torch_device) for t in scheduler.timesteps: sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 3812.9927) < 2e-2 assert abs(result_mean.item() - 4.9648) < 1e-3 def test_full_loop_with_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma # add noise t_start = self.num_inference_steps - 2 noise = self.dummy_noise_deter timesteps = scheduler.timesteps[t_start * scheduler.order :] sample = scheduler.add_noise(sample, noise, timesteps[:1]) for i, t in enumerate(timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 27663.6895) < 1e-2 assert abs(result_mean.item() - 36.0204) < 1e-3
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/schedulers/test_scheduler_ddim_parallel.py
# Copyright 2023 ParaDiGMS authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class DDIMParallelSchedulerTest(SchedulerCommonTest): scheduler_classes = (DDIMParallelScheduler,) forward_default_kwargs = (("eta", 0.0), ("num_inference_steps", 50)) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "clip_sample": True, } config.update(**kwargs) return config def full_loop(self, **config): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) num_inference_steps, eta = 10, 0.0 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_timesteps(num_inference_steps) for t in scheduler.timesteps: residual = model(sample, t) sample = scheduler.step(residual, t, sample, eta).prev_sample return sample def test_timesteps(self): for timesteps in [100, 500, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_steps_offset(self): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=steps_offset) scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(steps_offset=1) scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(5) assert torch.equal(scheduler.timesteps, torch.LongTensor([801, 601, 401, 201, 1])) def test_betas(self): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=schedule) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_clip_sample(self): for clip_sample in [True, False]: self.check_over_configs(clip_sample=clip_sample) def test_timestep_spacing(self): for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=timestep_spacing) def test_rescale_betas_zero_snr(self): for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr) def test_thresholding(self): self.check_over_configs(thresholding=False) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=True, prediction_type=prediction_type, sample_max_value=threshold, ) def test_time_indices(self): for t in [1, 10, 49]: self.check_over_forward(time_step=t) def test_inference_steps(self): for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500]): self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps) def test_eta(self): for t, eta in zip([1, 10, 49], [0.0, 0.5, 1.0]): self.check_over_forward(time_step=t, eta=eta) def test_variance(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) assert torch.sum(torch.abs(scheduler._get_variance(0, 0) - 0.0)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(420, 400) - 0.14771)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(980, 960) - 0.32460)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(0, 0) - 0.0)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487, 486) - 0.00979)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999, 998) - 0.02)) < 1e-5 def test_batch_step_no_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) num_inference_steps, eta = 10, 0.0 scheduler.set_timesteps(num_inference_steps) model = self.dummy_model() sample1 = self.dummy_sample_deter sample2 = self.dummy_sample_deter + 0.1 sample3 = self.dummy_sample_deter - 0.1 per_sample_batch = sample1.shape[0] samples = torch.stack([sample1, sample2, sample3], dim=0) timesteps = torch.arange(num_inference_steps)[0:3, None].repeat(1, per_sample_batch) residual = model(samples.flatten(0, 1), timesteps.flatten(0, 1)) pred_prev_sample = scheduler.batch_step_no_noise(residual, timesteps.flatten(0, 1), samples.flatten(0, 1), eta) result_sum = torch.sum(torch.abs(pred_prev_sample)) result_mean = torch.mean(torch.abs(pred_prev_sample)) assert abs(result_sum.item() - 1147.7904) < 1e-2 assert abs(result_mean.item() - 0.4982) < 1e-3 def test_full_loop_no_noise(self): sample = self.full_loop() result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 172.0067) < 1e-2 assert abs(result_mean.item() - 0.223967) < 1e-3 def test_full_loop_with_v_prediction(self): sample = self.full_loop(prediction_type="v_prediction") result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 52.5302) < 1e-2 assert abs(result_mean.item() - 0.0684) < 1e-3 def test_full_loop_with_set_alpha_to_one(self): # We specify different beta, so that the first alpha is 0.99 sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01) result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 149.8295) < 1e-2 assert abs(result_mean.item() - 0.1951) < 1e-3 def test_full_loop_with_no_set_alpha_to_one(self): # We specify different beta, so that the first alpha is 0.99 sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01) result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 149.0784) < 1e-2 assert abs(result_mean.item() - 0.1941) < 1e-3 def test_full_loop_with_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) num_inference_steps, eta = 10, 0.0 t_start = 8 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_timesteps(num_inference_steps) # add noise noise = self.dummy_noise_deter timesteps = scheduler.timesteps[t_start * scheduler.order :] sample = scheduler.add_noise(sample, noise, timesteps[:1]) for t in timesteps: residual = model(sample, t) sample = scheduler.step(residual, t, sample, eta).prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 354.5418) < 1e-2, f" expected result sum 354.5418, but get {result_sum}" assert abs(result_mean.item() - 0.4616) < 1e-3, f" expected result mean 0.4616, but get {result_mean}"
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/schedulers/test_scheduler_dpm_sde.py
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils.testing_utils import require_torchsde, torch_device from .test_schedulers import SchedulerCommonTest @require_torchsde class DPMSolverSDESchedulerTest(SchedulerCommonTest): scheduler_classes = (DPMSolverSDEScheduler,) num_inference_steps = 10 def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1100, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "noise_sampler_seed": 0, } config.update(**kwargs) return config def test_timesteps(self): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_betas(self): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=schedule) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_full_loop_no_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma sample = sample.to(torch_device) for i, t in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875) < 1e-2 assert abs(result_mean.item() - 0.2178705964565277) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406) < 1e-2 assert abs(result_mean.item() - 0.22342906892299652) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562) < 1e-2 assert abs(result_mean.item() - 0.211619570851326) < 1e-3 def test_full_loop_with_v_prediction(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(prediction_type="v_prediction") scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma sample = sample.to(torch_device) for i, t in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453) < 1e-2 assert abs(result_mean.item() - 0.16226289014816284) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703) < 1e-2 assert abs(result_mean.item() - 0.16688326001167297) < 1e-3 else: assert abs(result_sum.item() - 119.8487548828125) < 1e-2 assert abs(result_mean.item() - 0.1560530662536621) < 1e-3 def test_full_loop_device(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps, device=torch_device) model = self.dummy_model() sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma for t in scheduler.timesteps: sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938) < 1e-2 assert abs(result_mean.item() - 0.21805934607982635) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312) < 1e-2 assert abs(result_mean.item() - 0.22342908382415771) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562) < 1e-2 assert abs(result_mean.item() - 0.211619570851326) < 1e-3 def test_full_loop_device_karras_sigmas(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config, use_karras_sigmas=True) scheduler.set_timesteps(self.num_inference_steps, device=torch_device) model = self.dummy_model() sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma sample = sample.to(torch_device) for t in scheduler.timesteps: sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811) < 1e-2 else: assert abs(result_sum.item() - 170.3135223388672) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811) < 1e-2
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/schedulers/test_scheduler_kdpm2_ancestral.py
import torch from diffusers import KDPM2AncestralDiscreteScheduler from diffusers.utils.testing_utils import torch_device from .test_schedulers import SchedulerCommonTest class KDPM2AncestralDiscreteSchedulerTest(SchedulerCommonTest): scheduler_classes = (KDPM2AncestralDiscreteScheduler,) num_inference_steps = 10 def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1100, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**kwargs) return config def test_timesteps(self): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_betas(self): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=schedule) def test_full_loop_no_noise(self): if torch_device == "mps": return scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) generator = torch.manual_seed(0) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma sample = sample.to(torch_device) for i, t in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample, generator=generator) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 13849.3877) < 1e-2 assert abs(result_mean.item() - 18.0331) < 5e-3 def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_full_loop_with_v_prediction(self): if torch_device == "mps": return scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(prediction_type="v_prediction") scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma sample = sample.to(torch_device) generator = torch.manual_seed(0) for i, t in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample, generator=generator) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 328.9970) < 1e-2 assert abs(result_mean.item() - 0.4284) < 1e-3 def test_full_loop_device(self): if torch_device == "mps": return scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps, device=torch_device) generator = torch.manual_seed(0) model = self.dummy_model() sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma for t in scheduler.timesteps: sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample, generator=generator) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 13849.3818) < 1e-1 assert abs(result_mean.item() - 18.0331) < 1e-3 def test_full_loop_with_noise(self): if torch_device == "mps": return scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) generator = torch.manual_seed(0) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma # add noise t_start = self.num_inference_steps - 2 noise = self.dummy_noise_deter noise = noise.to(sample.device) timesteps = scheduler.timesteps[t_start * scheduler.order :] sample = scheduler.add_noise(sample, noise, timesteps[:1]) for i, t in enumerate(timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample, generator=generator) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 93087.0312) < 1e-2, f" expected result sum 93087.0312, but get {result_sum}" assert abs(result_mean.item() - 121.2071) < 5e-3, f" expected result mean 121.2071, but get {result_mean}"
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/schedulers/test_scheduler_lcm.py
import tempfile from typing import Dict, List, Tuple import torch from diffusers import LCMScheduler from diffusers.utils.testing_utils import torch_device from .test_schedulers import SchedulerCommonTest class LCMSchedulerTest(SchedulerCommonTest): scheduler_classes = (LCMScheduler,) forward_default_kwargs = (("num_inference_steps", 10),) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "beta_start": 0.00085, "beta_end": 0.0120, "beta_schedule": "scaled_linear", "prediction_type": "epsilon", } config.update(**kwargs) return config @property def default_valid_timestep(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) scheduler_config = self.get_scheduler_config() scheduler = self.scheduler_classes[0](**scheduler_config) scheduler.set_timesteps(num_inference_steps) timestep = scheduler.timesteps[-1] return timestep def test_timesteps(self): for timesteps in [100, 500, 1000]: # 0 is not guaranteed to be in the timestep schedule, but timesteps - 1 is self.check_over_configs(time_step=timesteps - 1, num_train_timesteps=timesteps) def test_betas(self): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): self.check_over_configs(time_step=self.default_valid_timestep, beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "scaled_linear", "squaredcos_cap_v2"]: self.check_over_configs(time_step=self.default_valid_timestep, beta_schedule=schedule) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(time_step=self.default_valid_timestep, prediction_type=prediction_type) def test_clip_sample(self): for clip_sample in [True, False]: self.check_over_configs(time_step=self.default_valid_timestep, clip_sample=clip_sample) def test_thresholding(self): self.check_over_configs(time_step=self.default_valid_timestep, thresholding=False) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( time_step=self.default_valid_timestep, thresholding=True, prediction_type=prediction_type, sample_max_value=threshold, ) def test_time_indices(self): # Get default timestep schedule. kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) scheduler_config = self.get_scheduler_config() scheduler = self.scheduler_classes[0](**scheduler_config) scheduler.set_timesteps(num_inference_steps) timesteps = scheduler.timesteps for t in timesteps: self.check_over_forward(time_step=t) def test_inference_steps(self): # Hardcoded for now for t, num_inference_steps in zip([99, 39, 39, 19], [10, 25, 26, 50]): self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps) # Override test_add_noise_device because the hardcoded num_inference_steps of 100 doesn't work # for LCMScheduler under default settings def test_add_noise_device(self, num_inference_steps=10): for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) sample = self.dummy_sample.to(torch_device) scaled_sample = scheduler.scale_model_input(sample, 0.0) self.assertEqual(sample.shape, scaled_sample.shape) noise = torch.randn_like(scaled_sample).to(torch_device) t = scheduler.timesteps[5][None] noised = scheduler.add_noise(scaled_sample, noise, t) self.assertEqual(noised.shape, scaled_sample.shape) # Override test_from_save_pretrained because it hardcodes a timestep of 1 def test_from_save_pretrained(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: timestep = self.default_valid_timestep scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) sample = self.dummy_sample residual = 0.1 * sample with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) scheduler.set_timesteps(num_inference_steps) new_scheduler.set_timesteps(num_inference_steps) kwargs["generator"] = torch.manual_seed(0) output = scheduler.step(residual, timestep, sample, **kwargs).prev_sample kwargs["generator"] = torch.manual_seed(0) new_output = new_scheduler.step(residual, timestep, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" # Override test_step_shape because uses 0 and 1 as hardcoded timesteps def test_step_shape(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) sample = self.dummy_sample residual = 0.1 * sample scheduler.set_timesteps(num_inference_steps) timestep_0 = scheduler.timesteps[-2] timestep_1 = scheduler.timesteps[-1] output_0 = scheduler.step(residual, timestep_0, sample, **kwargs).prev_sample output_1 = scheduler.step(residual, timestep_1, sample, **kwargs).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) # Override test_set_scheduler_outputs_equivalence since it uses 0 as a hardcoded timestep def test_scheduler_outputs_equivalence(self): def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", 50) timestep = self.default_valid_timestep for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) sample = self.dummy_sample residual = 0.1 * sample scheduler.set_timesteps(num_inference_steps) kwargs["generator"] = torch.manual_seed(0) outputs_dict = scheduler.step(residual, timestep, sample, **kwargs) scheduler.set_timesteps(num_inference_steps) kwargs["generator"] = torch.manual_seed(0) outputs_tuple = scheduler.step(residual, timestep, sample, return_dict=False, **kwargs) recursive_check(outputs_tuple, outputs_dict) def full_loop(self, num_inference_steps=10, seed=0, **config): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) model = self.dummy_model() sample = self.dummy_sample_deter generator = torch.manual_seed(seed) scheduler.set_timesteps(num_inference_steps) for t in scheduler.timesteps: residual = model(sample, t) sample = scheduler.step(residual, t, sample, generator).prev_sample return sample def test_full_loop_onestep(self): sample = self.full_loop(num_inference_steps=1) result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) # TODO: get expected sum and mean assert abs(result_sum.item() - 18.7097) < 1e-3 assert abs(result_mean.item() - 0.0244) < 1e-3 def test_full_loop_multistep(self): sample = self.full_loop(num_inference_steps=10) result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) # TODO: get expected sum and mean assert abs(result_sum.item() - 197.7616) < 1e-3 assert abs(result_mean.item() - 0.2575) < 1e-3 def test_custom_timesteps(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=timesteps) scheduler_timesteps = scheduler.timesteps for i, timestep in enumerate(scheduler_timesteps): if i == len(timesteps) - 1: expected_prev_t = -1 else: expected_prev_t = timesteps[i + 1] prev_t = scheduler.previous_timestep(timestep) prev_t = prev_t.item() self.assertEqual(prev_t, expected_prev_t) def test_custom_timesteps_increasing_order(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [100, 87, 50, 51, 0] with self.assertRaises(ValueError, msg="`custom_timesteps` must be in descending order."): scheduler.set_timesteps(timesteps=timesteps) def test_custom_timesteps_passing_both_num_inference_steps_and_timesteps(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [100, 87, 50, 1, 0] num_inference_steps = len(timesteps) with self.assertRaises(ValueError, msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."): scheduler.set_timesteps(num_inference_steps=num_inference_steps, timesteps=timesteps) def test_custom_timesteps_too_large(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [scheduler.config.num_train_timesteps] with self.assertRaises( ValueError, msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}", ): scheduler.set_timesteps(timesteps=timesteps)
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/schedulers/test_scheduler_dpm_single.py
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class DPMSolverSinglestepSchedulerTest(SchedulerCommonTest): scheduler_classes = (DPMSolverSinglestepScheduler,) forward_default_kwargs = (("num_inference_steps", 25),) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "solver_order": 2, "prediction_type": "epsilon", "thresholding": False, "sample_max_value": 1.0, "algorithm_type": "dpmsolver++", "solver_type": "midpoint", "lambda_min_clipped": -float("inf"), "variance_type": None, } config.update(**kwargs) return config def check_over_configs(self, time_step=0, **config): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) sample = self.dummy_sample residual = 0.1 * sample dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) new_scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order] output, new_output = sample, sample for t in range(time_step, time_step + scheduler.config.solver_order + 1): t = scheduler.timesteps[t] output = scheduler.step(residual, t, output, **kwargs).prev_sample new_output = new_scheduler.step(residual, t, new_output, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def test_from_save_pretrained(self): pass def check_over_forward(self, time_step=0, **forward_kwargs): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) sample = self.dummy_sample residual = 0.1 * sample dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals (must be after setting timesteps) scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) # copy over dummy past residuals new_scheduler.set_timesteps(num_inference_steps) # copy over dummy past residual (must be after setting timesteps) new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order] output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def full_loop(self, scheduler=None, **config): if scheduler is None: scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) num_inference_steps = 10 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_timesteps(num_inference_steps) for i, t in enumerate(scheduler.timesteps): residual = model(sample, t) sample = scheduler.step(residual, t, sample).prev_sample return sample def test_full_uneven_loop(self): scheduler = DPMSolverSinglestepScheduler(**self.get_scheduler_config()) num_inference_steps = 50 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_timesteps(num_inference_steps) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:]): residual = model(sample, t) sample = scheduler.step(residual, t, sample).prev_sample result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.2574) < 1e-3 def test_timesteps(self): for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_switch(self): # make sure that iterating over schedulers with same config names gives same results # for defaults scheduler = DPMSolverSinglestepScheduler(**self.get_scheduler_config()) sample = self.full_loop(scheduler=scheduler) result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.2791) < 1e-3 scheduler = DEISMultistepScheduler.from_config(scheduler.config) scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config) scheduler = UniPCMultistepScheduler.from_config(scheduler.config) scheduler = DPMSolverSinglestepScheduler.from_config(scheduler.config) sample = self.full_loop(scheduler=scheduler) result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.2791) < 1e-3 def test_thresholding(self): self.check_over_configs(thresholding=False) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=True, prediction_type=prediction_type, sample_max_value=threshold, algorithm_type="dpmsolver++", solver_order=order, solver_type=solver_type, ) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_solver_order_and_type(self): for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=order, solver_type=solver_type, prediction_type=prediction_type, algorithm_type=algorithm_type, ) sample = self.full_loop( solver_order=order, solver_type=solver_type, prediction_type=prediction_type, algorithm_type=algorithm_type, ) assert not torch.isnan(sample).any(), "Samples have nan numbers" def test_lower_order_final(self): self.check_over_configs(lower_order_final=True) self.check_over_configs(lower_order_final=False) def test_lambda_min_clipped(self): self.check_over_configs(lambda_min_clipped=-float("inf")) self.check_over_configs(lambda_min_clipped=-5.1) def test_variance_type(self): self.check_over_configs(variance_type=None) self.check_over_configs(variance_type="learned_range") def test_inference_steps(self): for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=num_inference_steps, time_step=0) def test_full_loop_no_noise(self): sample = self.full_loop() result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.2791) < 1e-3 def test_full_loop_with_karras(self): sample = self.full_loop(use_karras_sigmas=True) result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.2248) < 1e-3 def test_full_loop_with_v_prediction(self): sample = self.full_loop(prediction_type="v_prediction") result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.1453) < 1e-3 def test_full_loop_with_karras_and_v_prediction(self): sample = self.full_loop(prediction_type="v_prediction", use_karras_sigmas=True) result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.0649) < 1e-3 def test_fp16_support(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(thresholding=True, dynamic_thresholding_ratio=0) scheduler = scheduler_class(**scheduler_config) num_inference_steps = 10 model = self.dummy_model() sample = self.dummy_sample_deter.half() scheduler.set_timesteps(num_inference_steps) for i, t in enumerate(scheduler.timesteps): residual = model(sample, t) sample = scheduler.step(residual, t, sample).prev_sample assert sample.dtype == torch.float16 def test_step_shape(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) sample = self.dummy_sample residual = 0.1 * sample if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): scheduler.set_timesteps(num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] time_step_0 = scheduler.timesteps[0] time_step_1 = scheduler.timesteps[1] output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) def test_full_loop_with_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) num_inference_steps = 10 t_start = 5 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_timesteps(num_inference_steps) # add noise noise = self.dummy_noise_deter timesteps = scheduler.timesteps[t_start * scheduler.order :] sample = scheduler.add_noise(sample, noise, timesteps[:1]) for i, t in enumerate(timesteps): residual = model(sample, t) sample = scheduler.step(residual, t, sample).prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 269.2187) < 1e-2, f" expected result sum 269.2187, but get {result_sum}" assert abs(result_mean.item() - 0.3505) < 1e-3, f" expected result mean 0.3505, but get {result_mean}"
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/schedulers/test_scheduler_dpm_multi_inverse.py
import tempfile import torch from diffusers import DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler from .test_schedulers import SchedulerCommonTest class DPMSolverMultistepSchedulerTest(SchedulerCommonTest): scheduler_classes = (DPMSolverMultistepInverseScheduler,) forward_default_kwargs = (("num_inference_steps", 25),) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "solver_order": 2, "prediction_type": "epsilon", "thresholding": False, "sample_max_value": 1.0, "algorithm_type": "dpmsolver++", "solver_type": "midpoint", "lower_order_final": False, "lambda_min_clipped": -float("inf"), "variance_type": None, } config.update(**kwargs) return config def check_over_configs(self, time_step=0, **config): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) sample = self.dummy_sample residual = 0.1 * sample dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) new_scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order] output, new_output = sample, sample for t in range(time_step, time_step + scheduler.config.solver_order + 1): t = scheduler.timesteps[t] output = scheduler.step(residual, t, output, **kwargs).prev_sample new_output = new_scheduler.step(residual, t, new_output, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def test_from_save_pretrained(self): pass def check_over_forward(self, time_step=0, **forward_kwargs): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) sample = self.dummy_sample residual = 0.1 * sample dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals (must be after setting timesteps) scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) # copy over dummy past residuals new_scheduler.set_timesteps(num_inference_steps) # copy over dummy past residual (must be after setting timesteps) new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order] output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def full_loop(self, scheduler=None, **config): if scheduler is None: scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) num_inference_steps = 10 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_timesteps(num_inference_steps) for i, t in enumerate(scheduler.timesteps): residual = model(sample, t) sample = scheduler.step(residual, t, sample).prev_sample return sample def test_step_shape(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) sample = self.dummy_sample residual = 0.1 * sample if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): scheduler.set_timesteps(num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] time_step_0 = scheduler.timesteps[5] time_step_1 = scheduler.timesteps[6] output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) def test_timesteps(self): for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_thresholding(self): self.check_over_configs(thresholding=False) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=True, prediction_type=prediction_type, sample_max_value=threshold, algorithm_type="dpmsolver++", solver_order=order, solver_type=solver_type, ) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_solver_order_and_type(self): for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=order, solver_type=solver_type, prediction_type=prediction_type, algorithm_type=algorithm_type, ) sample = self.full_loop( solver_order=order, solver_type=solver_type, prediction_type=prediction_type, algorithm_type=algorithm_type, ) assert not torch.isnan(sample).any(), "Samples have nan numbers" def test_lower_order_final(self): self.check_over_configs(lower_order_final=True) self.check_over_configs(lower_order_final=False) def test_lambda_min_clipped(self): self.check_over_configs(lambda_min_clipped=-float("inf")) self.check_over_configs(lambda_min_clipped=-5.1) def test_variance_type(self): self.check_over_configs(variance_type=None) self.check_over_configs(variance_type="learned_range") def test_timestep_spacing(self): for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=timestep_spacing) def test_inference_steps(self): for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=num_inference_steps, time_step=0) def test_full_loop_no_noise(self): sample = self.full_loop() result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.7047) < 1e-3 def test_full_loop_no_noise_thres(self): sample = self.full_loop(thresholding=True, dynamic_thresholding_ratio=0.87, sample_max_value=0.5) result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 19.8933) < 1e-3 def test_full_loop_with_v_prediction(self): sample = self.full_loop(prediction_type="v_prediction") result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 1.5194) < 1e-3 def test_full_loop_with_karras_and_v_prediction(self): sample = self.full_loop(prediction_type="v_prediction", use_karras_sigmas=True) result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 1.7833) < 2e-3 def test_switch(self): # make sure that iterating over schedulers with same config names gives same results # for defaults scheduler = DPMSolverMultistepInverseScheduler(**self.get_scheduler_config()) sample = self.full_loop(scheduler=scheduler) result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.7047) < 1e-3 scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config) scheduler = DPMSolverMultistepInverseScheduler.from_config(scheduler.config) sample = self.full_loop(scheduler=scheduler) new_result_mean = torch.mean(torch.abs(sample)) assert abs(new_result_mean.item() - result_mean.item()) < 1e-3 def test_fp16_support(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(thresholding=True, dynamic_thresholding_ratio=0) scheduler = scheduler_class(**scheduler_config) num_inference_steps = 10 model = self.dummy_model() sample = self.dummy_sample_deter.half() scheduler.set_timesteps(num_inference_steps) for i, t in enumerate(scheduler.timesteps): residual = model(sample, t) sample = scheduler.step(residual, t, sample).prev_sample assert sample.dtype == torch.float16 def test_unique_timesteps(self, **config): for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(scheduler.config.num_train_timesteps) assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/schedulers/test_scheduler_euler_ancestral.py
import torch from diffusers import EulerAncestralDiscreteScheduler from diffusers.utils.testing_utils import torch_device from .test_schedulers import SchedulerCommonTest class EulerAncestralDiscreteSchedulerTest(SchedulerCommonTest): scheduler_classes = (EulerAncestralDiscreteScheduler,) num_inference_steps = 10 def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1100, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**kwargs) return config def test_timesteps(self): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_betas(self): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=schedule) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_full_loop_no_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) generator = torch.manual_seed(0) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() sample = sample.to(torch_device) for i, t in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample, generator=generator) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 152.3192) < 1e-2 assert abs(result_mean.item() - 0.1983) < 1e-3 def test_full_loop_with_v_prediction(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(prediction_type="v_prediction") scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) generator = torch.manual_seed(0) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma sample = sample.to(torch_device) for i, t in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample, generator=generator) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 108.4439) < 1e-2 assert abs(result_mean.item() - 0.1412) < 1e-3 def test_full_loop_device(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps, device=torch_device) generator = torch.manual_seed(0) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() sample = sample.to(torch_device) for t in scheduler.timesteps: sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample, generator=generator) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 152.3192) < 1e-2 assert abs(result_mean.item() - 0.1983) < 1e-3 def test_full_loop_with_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) t_start = self.num_inference_steps - 2 scheduler.set_timesteps(self.num_inference_steps) generator = torch.manual_seed(0) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma # add noise noise = self.dummy_noise_deter noise = noise.to(sample.device) timesteps = scheduler.timesteps[t_start * scheduler.order :] sample = scheduler.add_noise(sample, noise, timesteps[:1]) for i, t in enumerate(timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample, generator=generator) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 56163.0508) < 1e-2, f" expected result sum 56163.0508, but get {result_sum}" assert abs(result_mean.item() - 73.1290) < 1e-3, f" expected result mean 73.1290, but get {result_mean}"
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/schedulers/test_scheduler_vq_diffusion.py
import torch import torch.nn.functional as F from diffusers import VQDiffusionScheduler from .test_schedulers import SchedulerCommonTest class VQDiffusionSchedulerTest(SchedulerCommonTest): scheduler_classes = (VQDiffusionScheduler,) def get_scheduler_config(self, **kwargs): config = { "num_vec_classes": 4097, "num_train_timesteps": 100, } config.update(**kwargs) return config def dummy_sample(self, num_vec_classes): batch_size = 4 height = 8 width = 8 sample = torch.randint(0, num_vec_classes, (batch_size, height * width)) return sample @property def dummy_sample_deter(self): assert False def dummy_model(self, num_vec_classes): def model(sample, t, *args): batch_size, num_latent_pixels = sample.shape logits = torch.rand((batch_size, num_vec_classes - 1, num_latent_pixels)) return_value = F.log_softmax(logits.double(), dim=1).float() return return_value return model def test_timesteps(self): for timesteps in [2, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_num_vec_classes(self): for num_vec_classes in [5, 100, 1000, 4000]: self.check_over_configs(num_vec_classes=num_vec_classes) def test_time_indices(self): for t in [0, 50, 99]: self.check_over_forward(time_step=t) def test_add_noise_device(self): pass
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/schedulers/test_scheduler_ddpm_parallel.py
# Copyright 2023 ParaDiGMS authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class DDPMParallelSchedulerTest(SchedulerCommonTest): scheduler_classes = (DDPMParallelScheduler,) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**kwargs) return config def test_timesteps(self): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_betas(self): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=schedule) def test_variance_type(self): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=variance) def test_clip_sample(self): for clip_sample in [True, False]: self.check_over_configs(clip_sample=clip_sample) def test_thresholding(self): self.check_over_configs(thresholding=False) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=True, prediction_type=prediction_type, sample_max_value=threshold, ) def test_prediction_type(self): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_time_indices(self): for t in [0, 500, 999]: self.check_over_forward(time_step=t) def test_variance(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.00979)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1e-5 def test_batch_step_no_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) num_trained_timesteps = len(scheduler) model = self.dummy_model() sample1 = self.dummy_sample_deter sample2 = self.dummy_sample_deter + 0.1 sample3 = self.dummy_sample_deter - 0.1 per_sample_batch = sample1.shape[0] samples = torch.stack([sample1, sample2, sample3], dim=0) timesteps = torch.arange(num_trained_timesteps)[0:3, None].repeat(1, per_sample_batch) residual = model(samples.flatten(0, 1), timesteps.flatten(0, 1)) pred_prev_sample = scheduler.batch_step_no_noise(residual, timesteps.flatten(0, 1), samples.flatten(0, 1)) result_sum = torch.sum(torch.abs(pred_prev_sample)) result_mean = torch.mean(torch.abs(pred_prev_sample)) assert abs(result_sum.item() - 1153.1833) < 1e-2 assert abs(result_mean.item() - 0.5005) < 1e-3 def test_full_loop_no_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) num_trained_timesteps = len(scheduler) model = self.dummy_model() sample = self.dummy_sample_deter generator = torch.manual_seed(0) for t in reversed(range(num_trained_timesteps)): # 1. predict noise residual residual = model(sample, t) # 2. predict previous mean of sample x_t-1 pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample sample = pred_prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 258.9606) < 1e-2 assert abs(result_mean.item() - 0.3372) < 1e-3 def test_full_loop_with_v_prediction(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(prediction_type="v_prediction") scheduler = scheduler_class(**scheduler_config) num_trained_timesteps = len(scheduler) model = self.dummy_model() sample = self.dummy_sample_deter generator = torch.manual_seed(0) for t in reversed(range(num_trained_timesteps)): # 1. predict noise residual residual = model(sample, t) # 2. predict previous mean of sample x_t-1 pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample sample = pred_prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 202.0296) < 1e-2 assert abs(result_mean.item() - 0.2631) < 1e-3 def test_custom_timesteps(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=timesteps) scheduler_timesteps = scheduler.timesteps for i, timestep in enumerate(scheduler_timesteps): if i == len(timesteps) - 1: expected_prev_t = -1 else: expected_prev_t = timesteps[i + 1] prev_t = scheduler.previous_timestep(timestep) prev_t = prev_t.item() self.assertEqual(prev_t, expected_prev_t) def test_custom_timesteps_increasing_order(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [100, 87, 50, 51, 0] with self.assertRaises(ValueError, msg="`custom_timesteps` must be in descending order."): scheduler.set_timesteps(timesteps=timesteps) def test_custom_timesteps_passing_both_num_inference_steps_and_timesteps(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [100, 87, 50, 1, 0] num_inference_steps = len(timesteps) with self.assertRaises(ValueError, msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."): scheduler.set_timesteps(num_inference_steps=num_inference_steps, timesteps=timesteps) def test_custom_timesteps_too_large(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [scheduler.config.num_train_timesteps] with self.assertRaises( ValueError, msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}", ): scheduler.set_timesteps(timesteps=timesteps) def test_full_loop_with_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) num_trained_timesteps = len(scheduler) t_start = num_trained_timesteps - 2 model = self.dummy_model() sample = self.dummy_sample_deter generator = torch.manual_seed(0) # add noise noise = self.dummy_noise_deter timesteps = scheduler.timesteps[t_start * scheduler.order :] sample = scheduler.add_noise(sample, noise, timesteps[:1]) for t in timesteps: # 1. predict noise residual residual = model(sample, t) # 2. predict previous mean of sample x_t-1 pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample sample = pred_prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 387.9466) < 1e-2, f" expected result sum 387.9466, but get {result_sum}" assert abs(result_mean.item() - 0.5051) < 1e-3, f" expected result mean 0.5051, but get {result_mean}"
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/schedulers/test_scheduler_pndm.py
import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class PNDMSchedulerTest(SchedulerCommonTest): scheduler_classes = (PNDMScheduler,) forward_default_kwargs = (("num_inference_steps", 50),) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**kwargs) return config def check_over_configs(self, time_step=0, **config): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) sample = self.dummy_sample residual = 0.1 * sample dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals scheduler.ets = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) new_scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals new_scheduler.ets = dummy_past_residuals[:] output = scheduler.step_prk(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step_prk(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" output = scheduler.step_plms(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step_plms(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def test_from_save_pretrained(self): pass def check_over_forward(self, time_step=0, **forward_kwargs): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) sample = self.dummy_sample residual = 0.1 * sample dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals (must be after setting timesteps) scheduler.ets = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) # copy over dummy past residuals new_scheduler.set_timesteps(num_inference_steps) # copy over dummy past residual (must be after setting timesteps) new_scheduler.ets = dummy_past_residuals[:] output = scheduler.step_prk(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step_prk(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" output = scheduler.step_plms(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step_plms(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def full_loop(self, **config): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) num_inference_steps = 10 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_timesteps(num_inference_steps) for i, t in enumerate(scheduler.prk_timesteps): residual = model(sample, t) sample = scheduler.step_prk(residual, t, sample).prev_sample for i, t in enumerate(scheduler.plms_timesteps): residual = model(sample, t) sample = scheduler.step_plms(residual, t, sample).prev_sample return sample def test_step_shape(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) sample = self.dummy_sample residual = 0.1 * sample if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): scheduler.set_timesteps(num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] scheduler.ets = dummy_past_residuals[:] output_0 = scheduler.step_prk(residual, 0, sample, **kwargs).prev_sample output_1 = scheduler.step_prk(residual, 1, sample, **kwargs).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) output_0 = scheduler.step_plms(residual, 0, sample, **kwargs).prev_sample output_1 = scheduler.step_plms(residual, 1, sample, **kwargs).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) def test_timesteps(self): for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_steps_offset(self): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=steps_offset) scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(steps_offset=1) scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(10) assert torch.equal( scheduler.timesteps, torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ), ) def test_betas(self): for beta_start, beta_end in zip([0.0001, 0.001], [0.002, 0.02]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=schedule) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_time_indices(self): for t in [1, 5, 10]: self.check_over_forward(time_step=t) def test_inference_steps(self): for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100]): self.check_over_forward(num_inference_steps=num_inference_steps) def test_pow_of_3_inference_steps(self): # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 num_inference_steps = 27 for scheduler_class in self.scheduler_classes: sample = self.dummy_sample residual = 0.1 * sample scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2]): sample = scheduler.step_prk(residual, t, sample).prev_sample def test_inference_plms_no_past_residuals(self): with self.assertRaises(ValueError): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.step_plms(self.dummy_sample, 1, self.dummy_sample).prev_sample def test_full_loop_no_noise(self): sample = self.full_loop() result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 198.1318) < 1e-2 assert abs(result_mean.item() - 0.2580) < 1e-3 def test_full_loop_with_v_prediction(self): sample = self.full_loop(prediction_type="v_prediction") result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 67.3986) < 1e-2 assert abs(result_mean.item() - 0.0878) < 1e-3 def test_full_loop_with_set_alpha_to_one(self): # We specify different beta, so that the first alpha is 0.99 sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01) result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 230.0399) < 1e-2 assert abs(result_mean.item() - 0.2995) < 1e-3 def test_full_loop_with_no_set_alpha_to_one(self): # We specify different beta, so that the first alpha is 0.99 sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01) result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 186.9482) < 1e-2 assert abs(result_mean.item() - 0.2434) < 1e-3
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/schedulers/test_scheduler_score_sde_ve.py
import tempfile import unittest import numpy as np import torch from diffusers import ScoreSdeVeScheduler class ScoreSdeVeSchedulerTest(unittest.TestCase): # TODO adapt with class SchedulerCommonTest (scheduler needs Numpy Integration) scheduler_classes = (ScoreSdeVeScheduler,) forward_default_kwargs = () @property def dummy_sample(self): batch_size = 4 num_channels = 3 height = 8 width = 8 sample = torch.rand((batch_size, num_channels, height, width)) return sample @property def dummy_sample_deter(self): batch_size = 4 num_channels = 3 height = 8 width = 8 num_elems = batch_size * num_channels * height * width sample = torch.arange(num_elems) sample = sample.reshape(num_channels, height, width, batch_size) sample = sample / num_elems sample = sample.permute(3, 0, 1, 2) return sample def dummy_model(self): def model(sample, t, *args): return sample * t / (t + 1) return model def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 2000, "snr": 0.15, "sigma_min": 0.01, "sigma_max": 1348, "sampling_eps": 1e-5, } config.update(**kwargs) return config def check_over_configs(self, time_step=0, **config): kwargs = dict(self.forward_default_kwargs) for scheduler_class in self.scheduler_classes: sample = self.dummy_sample residual = 0.1 * sample scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) output = scheduler.step_pred( residual, time_step, sample, generator=torch.manual_seed(0), **kwargs ).prev_sample new_output = new_scheduler.step_pred( residual, time_step, sample, generator=torch.manual_seed(0), **kwargs ).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" output = scheduler.step_correct(residual, sample, generator=torch.manual_seed(0), **kwargs).prev_sample new_output = new_scheduler.step_correct( residual, sample, generator=torch.manual_seed(0), **kwargs ).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler correction are not identical" def check_over_forward(self, time_step=0, **forward_kwargs): kwargs = dict(self.forward_default_kwargs) kwargs.update(forward_kwargs) for scheduler_class in self.scheduler_classes: sample = self.dummy_sample residual = 0.1 * sample scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) output = scheduler.step_pred( residual, time_step, sample, generator=torch.manual_seed(0), **kwargs ).prev_sample new_output = new_scheduler.step_pred( residual, time_step, sample, generator=torch.manual_seed(0), **kwargs ).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" output = scheduler.step_correct(residual, sample, generator=torch.manual_seed(0), **kwargs).prev_sample new_output = new_scheduler.step_correct( residual, sample, generator=torch.manual_seed(0), **kwargs ).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler correction are not identical" def test_timesteps(self): for timesteps in [10, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_sigmas(self): for sigma_min, sigma_max in zip([0.0001, 0.001, 0.01], [1, 100, 1000]): self.check_over_configs(sigma_min=sigma_min, sigma_max=sigma_max) def test_time_indices(self): for t in [0.1, 0.5, 0.75]: self.check_over_forward(time_step=t) def test_full_loop_no_noise(self): kwargs = dict(self.forward_default_kwargs) scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) num_inference_steps = 3 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_sigmas(num_inference_steps) scheduler.set_timesteps(num_inference_steps) generator = torch.manual_seed(0) for i, t in enumerate(scheduler.timesteps): sigma_t = scheduler.sigmas[i] for _ in range(scheduler.config.correct_steps): with torch.no_grad(): model_output = model(sample, sigma_t) sample = scheduler.step_correct(model_output, sample, generator=generator, **kwargs).prev_sample with torch.no_grad(): model_output = model(sample, sigma_t) output = scheduler.step_pred(model_output, t, sample, generator=generator, **kwargs) sample, _ = output.prev_sample, output.prev_sample_mean result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert np.isclose(result_sum.item(), 14372758528.0) assert np.isclose(result_mean.item(), 18714530.0) def test_step_shape(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) sample = self.dummy_sample residual = 0.1 * sample if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): scheduler.set_timesteps(num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps output_0 = scheduler.step_pred(residual, 0, sample, generator=torch.manual_seed(0), **kwargs).prev_sample output_1 = scheduler.step_pred(residual, 1, sample, generator=torch.manual_seed(0), **kwargs).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape)
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/schedulers/test_schedulers.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import json import os import tempfile import unittest import uuid from typing import Dict, List, Tuple import numpy as np import torch from huggingface_hub import delete_repo import diffusers from diffusers import ( CMStochasticIterativeScheduler, DDIMScheduler, DEISMultistepScheduler, DiffusionPipeline, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, IPNDMScheduler, LMSDiscreteScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, logging, ) from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils.testing_utils import CaptureLogger, torch_device from ..others.test_utils import TOKEN, USER, is_staging_test torch.backends.cuda.matmul.allow_tf32 = False class SchedulerObject(SchedulerMixin, ConfigMixin): config_name = "config.json" @register_to_config def __init__( self, a=2, b=5, c=(2, 5), d="for diffusion", e=[1, 3], ): pass class SchedulerObject2(SchedulerMixin, ConfigMixin): config_name = "config.json" @register_to_config def __init__( self, a=2, b=5, c=(2, 5), d="for diffusion", f=[1, 3], ): pass class SchedulerObject3(SchedulerMixin, ConfigMixin): config_name = "config.json" @register_to_config def __init__( self, a=2, b=5, c=(2, 5), d="for diffusion", e=[1, 3], f=[1, 3], ): pass class SchedulerBaseTests(unittest.TestCase): def test_save_load_from_different_config(self): obj = SchedulerObject() # mock add obj class to `diffusers` setattr(diffusers, "SchedulerObject", SchedulerObject) logger = logging.get_logger("diffusers.configuration_utils") with tempfile.TemporaryDirectory() as tmpdirname: obj.save_config(tmpdirname) with CaptureLogger(logger) as cap_logger_1: config = SchedulerObject2.load_config(tmpdirname) new_obj_1 = SchedulerObject2.from_config(config) # now save a config parameter that is not expected with open(os.path.join(tmpdirname, SchedulerObject.config_name), "r") as f: data = json.load(f) data["unexpected"] = True with open(os.path.join(tmpdirname, SchedulerObject.config_name), "w") as f: json.dump(data, f) with CaptureLogger(logger) as cap_logger_2: config = SchedulerObject.load_config(tmpdirname) new_obj_2 = SchedulerObject.from_config(config) with CaptureLogger(logger) as cap_logger_3: config = SchedulerObject2.load_config(tmpdirname) new_obj_3 = SchedulerObject2.from_config(config) assert new_obj_1.__class__ == SchedulerObject2 assert new_obj_2.__class__ == SchedulerObject assert new_obj_3.__class__ == SchedulerObject2 assert cap_logger_1.out == "" assert ( cap_logger_2.out == "The config attributes {'unexpected': True} were passed to SchedulerObject, but are not expected and" " will" " be ignored. Please verify your config.json configuration file.\n" ) assert cap_logger_2.out.replace("SchedulerObject", "SchedulerObject2") == cap_logger_3.out def test_save_load_compatible_schedulers(self): SchedulerObject2._compatibles = ["SchedulerObject"] SchedulerObject._compatibles = ["SchedulerObject2"] obj = SchedulerObject() # mock add obj class to `diffusers` setattr(diffusers, "SchedulerObject", SchedulerObject) setattr(diffusers, "SchedulerObject2", SchedulerObject2) logger = logging.get_logger("diffusers.configuration_utils") with tempfile.TemporaryDirectory() as tmpdirname: obj.save_config(tmpdirname) # now save a config parameter that is expected by another class, but not origin class with open(os.path.join(tmpdirname, SchedulerObject.config_name), "r") as f: data = json.load(f) data["f"] = [0, 0] data["unexpected"] = True with open(os.path.join(tmpdirname, SchedulerObject.config_name), "w") as f: json.dump(data, f) with CaptureLogger(logger) as cap_logger: config = SchedulerObject.load_config(tmpdirname) new_obj = SchedulerObject.from_config(config) assert new_obj.__class__ == SchedulerObject assert ( cap_logger.out == "The config attributes {'unexpected': True} were passed to SchedulerObject, but are not expected and" " will" " be ignored. Please verify your config.json configuration file.\n" ) def test_save_load_from_different_config_comp_schedulers(self): SchedulerObject3._compatibles = ["SchedulerObject", "SchedulerObject2"] SchedulerObject2._compatibles = ["SchedulerObject", "SchedulerObject3"] SchedulerObject._compatibles = ["SchedulerObject2", "SchedulerObject3"] obj = SchedulerObject() # mock add obj class to `diffusers` setattr(diffusers, "SchedulerObject", SchedulerObject) setattr(diffusers, "SchedulerObject2", SchedulerObject2) setattr(diffusers, "SchedulerObject3", SchedulerObject3) logger = logging.get_logger("diffusers.configuration_utils") logger.setLevel(diffusers.logging.INFO) with tempfile.TemporaryDirectory() as tmpdirname: obj.save_config(tmpdirname) with CaptureLogger(logger) as cap_logger_1: config = SchedulerObject.load_config(tmpdirname) new_obj_1 = SchedulerObject.from_config(config) with CaptureLogger(logger) as cap_logger_2: config = SchedulerObject2.load_config(tmpdirname) new_obj_2 = SchedulerObject2.from_config(config) with CaptureLogger(logger) as cap_logger_3: config = SchedulerObject3.load_config(tmpdirname) new_obj_3 = SchedulerObject3.from_config(config) assert new_obj_1.__class__ == SchedulerObject assert new_obj_2.__class__ == SchedulerObject2 assert new_obj_3.__class__ == SchedulerObject3 assert cap_logger_1.out == "" assert cap_logger_2.out == "{'f'} was not found in config. Values will be initialized to default values.\n" assert cap_logger_3.out == "{'f'} was not found in config. Values will be initialized to default values.\n" def test_default_arguments_not_in_config(self): pipe = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", torch_dtype=torch.float16 ) assert pipe.scheduler.__class__ == DDIMScheduler # Default for DDIMScheduler assert pipe.scheduler.config.timestep_spacing == "leading" # Switch to a different one, verify we use the default for that class pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config) assert pipe.scheduler.config.timestep_spacing == "linspace" # Override with kwargs pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing") assert pipe.scheduler.config.timestep_spacing == "trailing" # Verify overridden kwargs stick pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) assert pipe.scheduler.config.timestep_spacing == "trailing" # And stick pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) assert pipe.scheduler.config.timestep_spacing == "trailing" def test_default_solver_type_after_switch(self): pipe = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", torch_dtype=torch.float16 ) assert pipe.scheduler.__class__ == DDIMScheduler pipe.scheduler = DEISMultistepScheduler.from_config(pipe.scheduler.config) assert pipe.scheduler.config.solver_type == "logrho" # Switch to UniPC, verify the solver is the default pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) assert pipe.scheduler.config.solver_type == "bh2" class SchedulerCommonTest(unittest.TestCase): scheduler_classes = () forward_default_kwargs = () @property def dummy_sample(self): batch_size = 4 num_channels = 3 height = 8 width = 8 sample = torch.rand((batch_size, num_channels, height, width)) return sample @property def dummy_noise_deter(self): batch_size = 4 num_channels = 3 height = 8 width = 8 num_elems = batch_size * num_channels * height * width sample = torch.arange(num_elems).flip(-1) sample = sample.reshape(num_channels, height, width, batch_size) sample = sample / num_elems sample = sample.permute(3, 0, 1, 2) return sample @property def dummy_sample_deter(self): batch_size = 4 num_channels = 3 height = 8 width = 8 num_elems = batch_size * num_channels * height * width sample = torch.arange(num_elems) sample = sample.reshape(num_channels, height, width, batch_size) sample = sample / num_elems sample = sample.permute(3, 0, 1, 2) return sample def get_scheduler_config(self): raise NotImplementedError def dummy_model(self): def model(sample, t, *args): # if t is a tensor, match the number of dimensions of sample if isinstance(t, torch.Tensor): num_dims = len(sample.shape) # pad t with 1s to match num_dims t = t.reshape(-1, *(1,) * (num_dims - 1)).to(sample.device).to(sample.dtype) return sample * t / (t + 1) return model def check_over_configs(self, time_step=0, **config): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: # TODO(Suraj) - delete the following two lines once DDPM, DDIM, and PNDM have timesteps casted to float by default if scheduler_class in (EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler): time_step = float(time_step) scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) if scheduler_class == CMStochasticIterativeScheduler: # Get valid timestep based on sigma_max, which should always be in timestep schedule. scaled_sigma_max = scheduler.sigma_to_t(scheduler.config.sigma_max) time_step = scaled_sigma_max if scheduler_class == VQDiffusionScheduler: num_vec_classes = scheduler_config["num_vec_classes"] sample = self.dummy_sample(num_vec_classes) model = self.dummy_model(num_vec_classes) residual = model(sample, time_step) else: sample = self.dummy_sample residual = 0.1 * sample with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): scheduler.set_timesteps(num_inference_steps) new_scheduler.set_timesteps(num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps # Make sure `scale_model_input` is invoked to prevent a warning if scheduler_class == CMStochasticIterativeScheduler: # Get valid timestep based on sigma_max, which should always be in timestep schedule. _ = scheduler.scale_model_input(sample, scaled_sigma_max) _ = new_scheduler.scale_model_input(sample, scaled_sigma_max) elif scheduler_class != VQDiffusionScheduler: _ = scheduler.scale_model_input(sample, 0) _ = new_scheduler.scale_model_input(sample, 0) # Set the seed before step() as some schedulers are stochastic like EulerAncestralDiscreteScheduler, EulerDiscreteScheduler if "generator" in set(inspect.signature(scheduler.step).parameters.keys()): kwargs["generator"] = torch.manual_seed(0) output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample if "generator" in set(inspect.signature(scheduler.step).parameters.keys()): kwargs["generator"] = torch.manual_seed(0) new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def check_over_forward(self, time_step=0, **forward_kwargs): kwargs = dict(self.forward_default_kwargs) kwargs.update(forward_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: if scheduler_class in (EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler): time_step = float(time_step) scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) if scheduler_class == VQDiffusionScheduler: num_vec_classes = scheduler_config["num_vec_classes"] sample = self.dummy_sample(num_vec_classes) model = self.dummy_model(num_vec_classes) residual = model(sample, time_step) else: sample = self.dummy_sample residual = 0.1 * sample with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): scheduler.set_timesteps(num_inference_steps) new_scheduler.set_timesteps(num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps if "generator" in set(inspect.signature(scheduler.step).parameters.keys()): kwargs["generator"] = torch.manual_seed(0) output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample if "generator" in set(inspect.signature(scheduler.step).parameters.keys()): kwargs["generator"] = torch.manual_seed(0) new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def test_from_save_pretrained(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: timestep = 1 if scheduler_class in (EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler): timestep = float(timestep) scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) if scheduler_class == CMStochasticIterativeScheduler: # Get valid timestep based on sigma_max, which should always be in timestep schedule. timestep = scheduler.sigma_to_t(scheduler.config.sigma_max) if scheduler_class == VQDiffusionScheduler: num_vec_classes = scheduler_config["num_vec_classes"] sample = self.dummy_sample(num_vec_classes) model = self.dummy_model(num_vec_classes) residual = model(sample, timestep) else: sample = self.dummy_sample residual = 0.1 * sample with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): scheduler.set_timesteps(num_inference_steps) new_scheduler.set_timesteps(num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps if "generator" in set(inspect.signature(scheduler.step).parameters.keys()): kwargs["generator"] = torch.manual_seed(0) output = scheduler.step(residual, timestep, sample, **kwargs).prev_sample if "generator" in set(inspect.signature(scheduler.step).parameters.keys()): kwargs["generator"] = torch.manual_seed(0) new_output = new_scheduler.step(residual, timestep, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def test_compatibles(self): for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) assert all(c is not None for c in scheduler.compatibles) for comp_scheduler_cls in scheduler.compatibles: comp_scheduler = comp_scheduler_cls.from_config(scheduler.config) assert comp_scheduler is not None new_scheduler = scheduler_class.from_config(comp_scheduler.config) new_scheduler_config = {k: v for k, v in new_scheduler.config.items() if k in scheduler.config} scheduler_diff = {k: v for k, v in new_scheduler.config.items() if k not in scheduler.config} # make sure that configs are essentially identical assert new_scheduler_config == dict(scheduler.config) # make sure that only differences are for configs that are not in init init_keys = inspect.signature(scheduler_class.__init__).parameters.keys() assert set(scheduler_diff.keys()).intersection(set(init_keys)) == set() def test_from_pretrained(self): for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_pretrained(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) # `_use_default_values` should not exist for just saved & loaded scheduler scheduler_config = dict(scheduler.config) del scheduler_config["_use_default_values"] assert scheduler_config == new_scheduler.config def test_step_shape(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) timestep_0 = 1 timestep_1 = 0 for scheduler_class in self.scheduler_classes: if scheduler_class in (EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler): timestep_0 = float(timestep_0) timestep_1 = float(timestep_1) scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) if scheduler_class == VQDiffusionScheduler: num_vec_classes = scheduler_config["num_vec_classes"] sample = self.dummy_sample(num_vec_classes) model = self.dummy_model(num_vec_classes) residual = model(sample, timestep_0) else: sample = self.dummy_sample residual = 0.1 * sample if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): scheduler.set_timesteps(num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps output_0 = scheduler.step(residual, timestep_0, sample, **kwargs).prev_sample output_1 = scheduler.step(residual, timestep_1, sample, **kwargs).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) def test_scheduler_outputs_equivalence(self): def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", 50) timestep = 0 if len(self.scheduler_classes) > 0 and self.scheduler_classes[0] == IPNDMScheduler: timestep = 1 for scheduler_class in self.scheduler_classes: if scheduler_class in (EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler): timestep = float(timestep) scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) if scheduler_class == CMStochasticIterativeScheduler: # Get valid timestep based on sigma_max, which should always be in timestep schedule. timestep = scheduler.sigma_to_t(scheduler.config.sigma_max) if scheduler_class == VQDiffusionScheduler: num_vec_classes = scheduler_config["num_vec_classes"] sample = self.dummy_sample(num_vec_classes) model = self.dummy_model(num_vec_classes) residual = model(sample, timestep) else: sample = self.dummy_sample residual = 0.1 * sample if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): scheduler.set_timesteps(num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps # Set the seed before state as some schedulers are stochastic like EulerAncestralDiscreteScheduler, EulerDiscreteScheduler if "generator" in set(inspect.signature(scheduler.step).parameters.keys()): kwargs["generator"] = torch.manual_seed(0) outputs_dict = scheduler.step(residual, timestep, sample, **kwargs) if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): scheduler.set_timesteps(num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps # Set the seed before state as some schedulers are stochastic like EulerAncestralDiscreteScheduler, EulerDiscreteScheduler if "generator" in set(inspect.signature(scheduler.step).parameters.keys()): kwargs["generator"] = torch.manual_seed(0) outputs_tuple = scheduler.step(residual, timestep, sample, return_dict=False, **kwargs) recursive_check(outputs_tuple, outputs_dict) def test_scheduler_public_api(self): for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) if scheduler_class != VQDiffusionScheduler: self.assertTrue( hasattr(scheduler, "init_noise_sigma"), f"{scheduler_class} does not implement a required attribute `init_noise_sigma`", ) self.assertTrue( hasattr(scheduler, "scale_model_input"), ( f"{scheduler_class} does not implement a required class method `scale_model_input(sample," " timestep)`" ), ) self.assertTrue( hasattr(scheduler, "step"), f"{scheduler_class} does not implement a required class method `step(...)`", ) if scheduler_class != VQDiffusionScheduler: sample = self.dummy_sample if scheduler_class == CMStochasticIterativeScheduler: # Get valid timestep based on sigma_max, which should always be in timestep schedule. scaled_sigma_max = scheduler.sigma_to_t(scheduler.config.sigma_max) scaled_sample = scheduler.scale_model_input(sample, scaled_sigma_max) else: scaled_sample = scheduler.scale_model_input(sample, 0.0) self.assertEqual(sample.shape, scaled_sample.shape) def test_add_noise_device(self): for scheduler_class in self.scheduler_classes: if scheduler_class == IPNDMScheduler: continue scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(100) sample = self.dummy_sample.to(torch_device) if scheduler_class == CMStochasticIterativeScheduler: # Get valid timestep based on sigma_max, which should always be in timestep schedule. scaled_sigma_max = scheduler.sigma_to_t(scheduler.config.sigma_max) scaled_sample = scheduler.scale_model_input(sample, scaled_sigma_max) else: scaled_sample = scheduler.scale_model_input(sample, 0.0) self.assertEqual(sample.shape, scaled_sample.shape) noise = torch.randn_like(scaled_sample).to(torch_device) t = scheduler.timesteps[5][None] noised = scheduler.add_noise(scaled_sample, noise, t) self.assertEqual(noised.shape, scaled_sample.shape) def test_deprecated_kwargs(self): for scheduler_class in self.scheduler_classes: has_kwarg_in_model_class = "kwargs" in inspect.signature(scheduler_class.__init__).parameters has_deprecated_kwarg = len(scheduler_class._deprecated_kwargs) > 0 if has_kwarg_in_model_class and not has_deprecated_kwarg: raise ValueError( f"{scheduler_class} has `**kwargs` in its __init__ method but has not defined any deprecated" " kwargs under the `_deprecated_kwargs` class attribute. Make sure to either remove `**kwargs` if" " there are no deprecated arguments or add the deprecated argument with `_deprecated_kwargs =" " [<deprecated_argument>]`" ) if not has_kwarg_in_model_class and has_deprecated_kwarg: raise ValueError( f"{scheduler_class} doesn't have `**kwargs` in its __init__ method but has defined deprecated" " kwargs under the `_deprecated_kwargs` class attribute. Make sure to either add the `**kwargs`" f" argument to {self.model_class}.__init__ if there are deprecated arguments or remove the" " deprecated argument from `_deprecated_kwargs = [<deprecated_argument>]`" ) def test_trained_betas(self): for scheduler_class in self.scheduler_classes: if scheduler_class in (VQDiffusionScheduler, CMStochasticIterativeScheduler): continue scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config, trained_betas=np.array([0.1, 0.3])) with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_pretrained(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) assert scheduler.betas.tolist() == new_scheduler.betas.tolist() def test_getattr_is_correct(self): for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) # save some things to test scheduler.dummy_attribute = 5 scheduler.register_to_config(test_attribute=5) logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: assert hasattr(scheduler, "dummy_attribute") assert getattr(scheduler, "dummy_attribute") == 5 assert scheduler.dummy_attribute == 5 # no warning should be thrown assert cap_logger.out == "" logger = logging.get_logger("diffusers.schedulers.schedulering_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: assert hasattr(scheduler, "save_pretrained") fn = scheduler.save_pretrained fn_1 = getattr(scheduler, "save_pretrained") assert fn == fn_1 # no warning should be thrown assert cap_logger.out == "" # warning should be thrown with self.assertWarns(FutureWarning): assert scheduler.test_attribute == 5 with self.assertWarns(FutureWarning): assert getattr(scheduler, "test_attribute") == 5 with self.assertRaises(AttributeError) as error: scheduler.does_not_exist assert str(error.exception) == f"'{type(scheduler).__name__}' object has no attribute 'does_not_exist'" @is_staging_test class SchedulerPushToHubTester(unittest.TestCase): identifier = uuid.uuid4() repo_id = f"test-scheduler-{identifier}" org_repo_id = f"valid_org/{repo_id}-org" def test_push_to_hub(self): scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) scheduler.push_to_hub(self.repo_id, token=TOKEN) scheduler_loaded = DDIMScheduler.from_pretrained(f"{USER}/{self.repo_id}") assert type(scheduler) == type(scheduler_loaded) # Reset repo delete_repo(token=TOKEN, repo_id=self.repo_id) # Push to hub via save_config with tempfile.TemporaryDirectory() as tmp_dir: scheduler.save_config(tmp_dir, repo_id=self.repo_id, push_to_hub=True, token=TOKEN) scheduler_loaded = DDIMScheduler.from_pretrained(f"{USER}/{self.repo_id}") assert type(scheduler) == type(scheduler_loaded) # Reset repo delete_repo(token=TOKEN, repo_id=self.repo_id) def test_push_to_hub_in_organization(self): scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) scheduler.push_to_hub(self.org_repo_id, token=TOKEN) scheduler_loaded = DDIMScheduler.from_pretrained(self.org_repo_id) assert type(scheduler) == type(scheduler_loaded) # Reset repo delete_repo(token=TOKEN, repo_id=self.org_repo_id) # Push to hub via save_config with tempfile.TemporaryDirectory() as tmp_dir: scheduler.save_config(tmp_dir, repo_id=self.org_repo_id, push_to_hub=True, token=TOKEN) scheduler_loaded = DDIMScheduler.from_pretrained(self.org_repo_id) assert type(scheduler) == type(scheduler_loaded) # Reset repo delete_repo(token=TOKEN, repo_id=self.org_repo_id)
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/schedulers/test_scheduler_ddpm.py
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class DDPMSchedulerTest(SchedulerCommonTest): scheduler_classes = (DDPMScheduler,) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**kwargs) return config def test_timesteps(self): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_betas(self): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=schedule) def test_variance_type(self): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=variance) def test_clip_sample(self): for clip_sample in [True, False]: self.check_over_configs(clip_sample=clip_sample) def test_thresholding(self): self.check_over_configs(thresholding=False) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=True, prediction_type=prediction_type, sample_max_value=threshold, ) def test_prediction_type(self): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_time_indices(self): for t in [0, 500, 999]: self.check_over_forward(time_step=t) def test_variance(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.00979)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1e-5 def test_full_loop_no_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) num_trained_timesteps = len(scheduler) model = self.dummy_model() sample = self.dummy_sample_deter generator = torch.manual_seed(0) for t in reversed(range(num_trained_timesteps)): # 1. predict noise residual residual = model(sample, t) # 2. predict previous mean of sample x_t-1 pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance sample = pred_prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 258.9606) < 1e-2 assert abs(result_mean.item() - 0.3372) < 1e-3 def test_full_loop_with_v_prediction(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(prediction_type="v_prediction") scheduler = scheduler_class(**scheduler_config) num_trained_timesteps = len(scheduler) model = self.dummy_model() sample = self.dummy_sample_deter generator = torch.manual_seed(0) for t in reversed(range(num_trained_timesteps)): # 1. predict noise residual residual = model(sample, t) # 2. predict previous mean of sample x_t-1 pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance sample = pred_prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 202.0296) < 1e-2 assert abs(result_mean.item() - 0.2631) < 1e-3 def test_custom_timesteps(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=timesteps) scheduler_timesteps = scheduler.timesteps for i, timestep in enumerate(scheduler_timesteps): if i == len(timesteps) - 1: expected_prev_t = -1 else: expected_prev_t = timesteps[i + 1] prev_t = scheduler.previous_timestep(timestep) prev_t = prev_t.item() self.assertEqual(prev_t, expected_prev_t) def test_custom_timesteps_increasing_order(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [100, 87, 50, 51, 0] with self.assertRaises(ValueError, msg="`custom_timesteps` must be in descending order."): scheduler.set_timesteps(timesteps=timesteps) def test_custom_timesteps_passing_both_num_inference_steps_and_timesteps(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [100, 87, 50, 1, 0] num_inference_steps = len(timesteps) with self.assertRaises(ValueError, msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."): scheduler.set_timesteps(num_inference_steps=num_inference_steps, timesteps=timesteps) def test_custom_timesteps_too_large(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [scheduler.config.num_train_timesteps] with self.assertRaises( ValueError, msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}", ): scheduler.set_timesteps(timesteps=timesteps) def test_full_loop_with_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) num_trained_timesteps = len(scheduler) t_start = num_trained_timesteps - 2 model = self.dummy_model() sample = self.dummy_sample_deter generator = torch.manual_seed(0) # add noise noise = self.dummy_noise_deter timesteps = scheduler.timesteps[t_start * scheduler.order :] sample = scheduler.add_noise(sample, noise, timesteps[:1]) for t in timesteps: # 1. predict noise residual residual = model(sample, t) # 2. predict previous mean of sample x_t-1 pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample sample = pred_prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 387.9466) < 1e-2, f" expected result sum 387.9466, but get {result_sum}" assert abs(result_mean.item() - 0.5051) < 1e-3, f" expected result mean 0.5051, but get {result_mean}"
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/schedulers/test_scheduler_ipndm.py
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class IPNDMSchedulerTest(SchedulerCommonTest): scheduler_classes = (IPNDMScheduler,) forward_default_kwargs = (("num_inference_steps", 50),) def get_scheduler_config(self, **kwargs): config = {"num_train_timesteps": 1000} config.update(**kwargs) return config def check_over_configs(self, time_step=0, **config): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) sample = self.dummy_sample residual = 0.1 * sample dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals scheduler.ets = dummy_past_residuals[:] if time_step is None: time_step = scheduler.timesteps[len(scheduler.timesteps) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) new_scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals new_scheduler.ets = dummy_past_residuals[:] output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def test_from_save_pretrained(self): pass def check_over_forward(self, time_step=0, **forward_kwargs): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) sample = self.dummy_sample residual = 0.1 * sample dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals (must be after setting timesteps) scheduler.ets = dummy_past_residuals[:] if time_step is None: time_step = scheduler.timesteps[len(scheduler.timesteps) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) # copy over dummy past residuals new_scheduler.set_timesteps(num_inference_steps) # copy over dummy past residual (must be after setting timesteps) new_scheduler.ets = dummy_past_residuals[:] output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def full_loop(self, **config): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) num_inference_steps = 10 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_timesteps(num_inference_steps) for i, t in enumerate(scheduler.timesteps): residual = model(sample, t) sample = scheduler.step(residual, t, sample).prev_sample scheduler._step_index = None for i, t in enumerate(scheduler.timesteps): residual = model(sample, t) sample = scheduler.step(residual, t, sample).prev_sample return sample def test_step_shape(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) sample = self.dummy_sample residual = 0.1 * sample if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): scheduler.set_timesteps(num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] scheduler.ets = dummy_past_residuals[:] time_step_0 = scheduler.timesteps[5] time_step_1 = scheduler.timesteps[6] output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) def test_timesteps(self): for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=timesteps, time_step=None) def test_inference_steps(self): for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100]): self.check_over_forward(num_inference_steps=num_inference_steps, time_step=None) def test_full_loop_no_noise(self): sample = self.full_loop() result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 2540529) < 10
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/schedulers/test_scheduler_dpm_multi.py
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class DPMSolverMultistepSchedulerTest(SchedulerCommonTest): scheduler_classes = (DPMSolverMultistepScheduler,) forward_default_kwargs = (("num_inference_steps", 25),) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "solver_order": 2, "prediction_type": "epsilon", "thresholding": False, "sample_max_value": 1.0, "algorithm_type": "dpmsolver++", "solver_type": "midpoint", "lower_order_final": False, "euler_at_final": False, "lambda_min_clipped": -float("inf"), "variance_type": None, } config.update(**kwargs) return config def check_over_configs(self, time_step=0, **config): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) sample = self.dummy_sample residual = 0.1 * sample dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) new_scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order] output, new_output = sample, sample for t in range(time_step, time_step + scheduler.config.solver_order + 1): t = new_scheduler.timesteps[t] output = scheduler.step(residual, t, output, **kwargs).prev_sample new_output = new_scheduler.step(residual, t, new_output, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def test_from_save_pretrained(self): pass def check_over_forward(self, time_step=0, **forward_kwargs): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) sample = self.dummy_sample residual = 0.1 * sample dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals (must be after setting timesteps) scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) # copy over dummy past residuals new_scheduler.set_timesteps(num_inference_steps) # copy over dummy past residual (must be after setting timesteps) new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order] time_step = new_scheduler.timesteps[time_step] output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def full_loop(self, scheduler=None, **config): if scheduler is None: scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) num_inference_steps = 10 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_timesteps(num_inference_steps) for i, t in enumerate(scheduler.timesteps): residual = model(sample, t) sample = scheduler.step(residual, t, sample).prev_sample return sample def test_step_shape(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) sample = self.dummy_sample residual = 0.1 * sample if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): scheduler.set_timesteps(num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] time_step_0 = scheduler.timesteps[5] time_step_1 = scheduler.timesteps[6] output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) def test_timesteps(self): for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_thresholding(self): self.check_over_configs(thresholding=False) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=True, prediction_type=prediction_type, sample_max_value=threshold, algorithm_type="dpmsolver++", solver_order=order, solver_type=solver_type, ) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_solver_order_and_type(self): for algorithm_type in ["dpmsolver", "dpmsolver++", "sde-dpmsolver", "sde-dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: if algorithm_type in ["sde-dpmsolver", "sde-dpmsolver++"]: if order == 3: continue else: self.check_over_configs( solver_order=order, solver_type=solver_type, prediction_type=prediction_type, algorithm_type=algorithm_type, ) sample = self.full_loop( solver_order=order, solver_type=solver_type, prediction_type=prediction_type, algorithm_type=algorithm_type, ) assert not torch.isnan(sample).any(), "Samples have nan numbers" def test_lower_order_final(self): self.check_over_configs(lower_order_final=True) self.check_over_configs(lower_order_final=False) def test_euler_at_final(self): self.check_over_configs(euler_at_final=True) self.check_over_configs(euler_at_final=False) def test_lambda_min_clipped(self): self.check_over_configs(lambda_min_clipped=-float("inf")) self.check_over_configs(lambda_min_clipped=-5.1) def test_variance_type(self): self.check_over_configs(variance_type=None) self.check_over_configs(variance_type="learned_range") def test_inference_steps(self): for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=num_inference_steps, time_step=0) def test_full_loop_no_noise(self): sample = self.full_loop() result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.3301) < 1e-3 def test_full_loop_with_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) num_inference_steps = 10 t_start = 5 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_timesteps(num_inference_steps) # add noise noise = self.dummy_noise_deter timesteps = scheduler.timesteps[t_start * scheduler.order :] sample = scheduler.add_noise(sample, noise, timesteps[:1]) for i, t in enumerate(timesteps): residual = model(sample, t) sample = scheduler.step(residual, t, sample).prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 318.4111) < 1e-2, f" expected result sum 318.4111, but get {result_sum}" assert abs(result_mean.item() - 0.4146) < 1e-3, f" expected result mean 0.4146, but get {result_mean}" def test_full_loop_no_noise_thres(self): sample = self.full_loop(thresholding=True, dynamic_thresholding_ratio=0.87, sample_max_value=0.5) result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 1.1364) < 1e-3 def test_full_loop_with_v_prediction(self): sample = self.full_loop(prediction_type="v_prediction") result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.2251) < 1e-3 def test_full_loop_with_karras_and_v_prediction(self): sample = self.full_loop(prediction_type="v_prediction", use_karras_sigmas=True) result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.2096) < 1e-3 def test_full_loop_with_lu_and_v_prediction(self): sample = self.full_loop(prediction_type="v_prediction", use_lu_lambdas=True) result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.1554) < 1e-3 def test_switch(self): # make sure that iterating over schedulers with same config names gives same results # for defaults scheduler = DPMSolverMultistepScheduler(**self.get_scheduler_config()) sample = self.full_loop(scheduler=scheduler) result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.3301) < 1e-3 scheduler = DPMSolverSinglestepScheduler.from_config(scheduler.config) scheduler = UniPCMultistepScheduler.from_config(scheduler.config) scheduler = DEISMultistepScheduler.from_config(scheduler.config) scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config) sample = self.full_loop(scheduler=scheduler) result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.3301) < 1e-3 def test_fp16_support(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(thresholding=True, dynamic_thresholding_ratio=0) scheduler = scheduler_class(**scheduler_config) num_inference_steps = 10 model = self.dummy_model() sample = self.dummy_sample_deter.half() scheduler.set_timesteps(num_inference_steps) for i, t in enumerate(scheduler.timesteps): residual = model(sample, t) sample = scheduler.step(residual, t, sample).prev_sample assert sample.dtype == torch.float16 def test_duplicated_timesteps(self, **config): for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(scheduler.config.num_train_timesteps) assert len(scheduler.timesteps) == scheduler.num_inference_steps
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/schedulers/test_scheduler_ddim_inverse.py
import torch from diffusers import DDIMInverseScheduler from .test_schedulers import SchedulerCommonTest class DDIMInverseSchedulerTest(SchedulerCommonTest): scheduler_classes = (DDIMInverseScheduler,) forward_default_kwargs = (("eta", 0.0), ("num_inference_steps", 50)) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "clip_sample": True, } config.update(**kwargs) return config def full_loop(self, **config): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) num_inference_steps, eta = 10, 0.0 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_timesteps(num_inference_steps) for t in scheduler.timesteps: residual = model(sample, t) sample = scheduler.step(residual, t, sample, eta).prev_sample return sample def test_timesteps(self): for timesteps in [100, 500, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_steps_offset(self): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=steps_offset) scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(steps_offset=1) scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(5) assert torch.equal(scheduler.timesteps, torch.LongTensor([1, 201, 401, 601, 801])) def test_betas(self): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=schedule) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_clip_sample(self): for clip_sample in [True, False]: self.check_over_configs(clip_sample=clip_sample) def test_timestep_spacing(self): for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=timestep_spacing) def test_rescale_betas_zero_snr(self): for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr) def test_thresholding(self): self.check_over_configs(thresholding=False) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=True, prediction_type=prediction_type, sample_max_value=threshold, ) def test_time_indices(self): for t in [1, 10, 49]: self.check_over_forward(time_step=t) def test_inference_steps(self): for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500]): self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps) def test_add_noise_device(self): pass def test_full_loop_no_noise(self): sample = self.full_loop() result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 671.6816) < 1e-2 assert abs(result_mean.item() - 0.8746) < 1e-3 def test_full_loop_with_v_prediction(self): sample = self.full_loop(prediction_type="v_prediction") result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 1394.2185) < 1e-2 assert abs(result_mean.item() - 1.8154) < 1e-3 def test_full_loop_with_set_alpha_to_one(self): # We specify different beta, so that the first alpha is 0.99 sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01) result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 539.9622) < 1e-2 assert abs(result_mean.item() - 0.7031) < 1e-3 def test_full_loop_with_no_set_alpha_to_one(self): # We specify different beta, so that the first alpha is 0.99 sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01) result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 542.6722) < 1e-2 assert abs(result_mean.item() - 0.7066) < 1e-3
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/schedulers/test_scheduler_ddim.py
import torch from diffusers import DDIMScheduler from .test_schedulers import SchedulerCommonTest class DDIMSchedulerTest(SchedulerCommonTest): scheduler_classes = (DDIMScheduler,) forward_default_kwargs = (("eta", 0.0), ("num_inference_steps", 50)) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "clip_sample": True, } config.update(**kwargs) return config def full_loop(self, **config): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) num_inference_steps, eta = 10, 0.0 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_timesteps(num_inference_steps) for t in scheduler.timesteps: residual = model(sample, t) sample = scheduler.step(residual, t, sample, eta).prev_sample return sample def test_timesteps(self): for timesteps in [100, 500, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_steps_offset(self): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=steps_offset) scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(steps_offset=1) scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(5) assert torch.equal(scheduler.timesteps, torch.LongTensor([801, 601, 401, 201, 1])) def test_betas(self): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=schedule) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_clip_sample(self): for clip_sample in [True, False]: self.check_over_configs(clip_sample=clip_sample) def test_timestep_spacing(self): for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=timestep_spacing) def test_rescale_betas_zero_snr(self): for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr) def test_thresholding(self): self.check_over_configs(thresholding=False) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=True, prediction_type=prediction_type, sample_max_value=threshold, ) def test_time_indices(self): for t in [1, 10, 49]: self.check_over_forward(time_step=t) def test_inference_steps(self): for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500]): self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps) def test_eta(self): for t, eta in zip([1, 10, 49], [0.0, 0.5, 1.0]): self.check_over_forward(time_step=t, eta=eta) def test_variance(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) assert torch.sum(torch.abs(scheduler._get_variance(0, 0) - 0.0)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(420, 400) - 0.14771)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(980, 960) - 0.32460)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(0, 0) - 0.0)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487, 486) - 0.00979)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999, 998) - 0.02)) < 1e-5 def test_full_loop_no_noise(self): sample = self.full_loop() result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 172.0067) < 1e-2 assert abs(result_mean.item() - 0.223967) < 1e-3 def test_full_loop_with_v_prediction(self): sample = self.full_loop(prediction_type="v_prediction") result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 52.5302) < 1e-2 assert abs(result_mean.item() - 0.0684) < 1e-3 def test_full_loop_with_set_alpha_to_one(self): # We specify different beta, so that the first alpha is 0.99 sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01) result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 149.8295) < 1e-2 assert abs(result_mean.item() - 0.1951) < 1e-3 def test_full_loop_with_no_set_alpha_to_one(self): # We specify different beta, so that the first alpha is 0.99 sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01) result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 149.0784) < 1e-2 assert abs(result_mean.item() - 0.1941) < 1e-3 def test_full_loop_with_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) num_inference_steps, eta = 10, 0.0 t_start = 8 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_timesteps(num_inference_steps) # add noise noise = self.dummy_noise_deter timesteps = scheduler.timesteps[t_start * scheduler.order :] sample = scheduler.add_noise(sample, noise, timesteps[:1]) for t in timesteps: residual = model(sample, t) sample = scheduler.step(residual, t, sample, eta).prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 354.5418) < 1e-2, f" expected result sum 218.4379, but get {result_sum}" assert abs(result_mean.item() - 0.4616) < 1e-3, f" expected result mean 0.2844, but get {result_mean}"
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/schedulers/test_scheduler_flax.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import tempfile import unittest from typing import Dict, List, Tuple from diffusers import FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxPNDMScheduler from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from jax import random jax_device = jax.default_backend() @require_flax class FlaxSchedulerCommonTest(unittest.TestCase): scheduler_classes = () forward_default_kwargs = () @property def dummy_sample(self): batch_size = 4 num_channels = 3 height = 8 width = 8 key1, key2 = random.split(random.PRNGKey(0)) sample = random.uniform(key1, (batch_size, num_channels, height, width)) return sample, key2 @property def dummy_sample_deter(self): batch_size = 4 num_channels = 3 height = 8 width = 8 num_elems = batch_size * num_channels * height * width sample = jnp.arange(num_elems) sample = sample.reshape(num_channels, height, width, batch_size) sample = sample / num_elems return jnp.transpose(sample, (3, 0, 1, 2)) def get_scheduler_config(self): raise NotImplementedError def dummy_model(self): def model(sample, t, *args): return sample * t / (t + 1) return model def check_over_configs(self, time_step=0, **config): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: sample, key = self.dummy_sample residual = 0.1 * sample scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps) new_state = new_scheduler.set_timesteps(new_state, num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps output = scheduler.step(state, residual, time_step, sample, key, **kwargs).prev_sample new_output = new_scheduler.step(new_state, residual, time_step, sample, key, **kwargs).prev_sample assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def check_over_forward(self, time_step=0, **forward_kwargs): kwargs = dict(self.forward_default_kwargs) kwargs.update(forward_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: sample, key = self.dummy_sample residual = 0.1 * sample scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps) new_state = new_scheduler.set_timesteps(new_state, num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps output = scheduler.step(state, residual, time_step, sample, key, **kwargs).prev_sample new_output = new_scheduler.step(new_state, residual, time_step, sample, key, **kwargs).prev_sample assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def test_from_save_pretrained(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: sample, key = self.dummy_sample residual = 0.1 * sample scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps) new_state = new_scheduler.set_timesteps(new_state, num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps output = scheduler.step(state, residual, 1, sample, key, **kwargs).prev_sample new_output = new_scheduler.step(new_state, residual, 1, sample, key, **kwargs).prev_sample assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def test_step_shape(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() sample, key = self.dummy_sample residual = 0.1 * sample if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps output_0 = scheduler.step(state, residual, 0, sample, key, **kwargs).prev_sample output_1 = scheduler.step(state, residual, 1, sample, key, **kwargs).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) def test_scheduler_outputs_equivalence(self): def set_nan_tensor_to_zero(t): return t.at[t != t].set(0) def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( jnp.allclose(set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5), msg=( "Tuple and dict output are not equal. Difference:" f" {jnp.max(jnp.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {jnp.isnan(tuple_object).any()} and `inf`: {jnp.isinf(tuple_object)}. Dict has" f" `nan`: {jnp.isnan(dict_object).any()} and `inf`: {jnp.isinf(dict_object)}." ), ) kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() sample, key = self.dummy_sample residual = 0.1 * sample if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps outputs_dict = scheduler.step(state, residual, 0, sample, key, **kwargs) if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps outputs_tuple = scheduler.step(state, residual, 0, sample, key, return_dict=False, **kwargs) recursive_check(outputs_tuple[0], outputs_dict.prev_sample) def test_deprecated_kwargs(self): for scheduler_class in self.scheduler_classes: has_kwarg_in_model_class = "kwargs" in inspect.signature(scheduler_class.__init__).parameters has_deprecated_kwarg = len(scheduler_class._deprecated_kwargs) > 0 if has_kwarg_in_model_class and not has_deprecated_kwarg: raise ValueError( f"{scheduler_class} has `**kwargs` in its __init__ method but has not defined any deprecated" " kwargs under the `_deprecated_kwargs` class attribute. Make sure to either remove `**kwargs` if" " there are no deprecated arguments or add the deprecated argument with `_deprecated_kwargs =" " [<deprecated_argument>]`" ) if not has_kwarg_in_model_class and has_deprecated_kwarg: raise ValueError( f"{scheduler_class} doesn't have `**kwargs` in its __init__ method but has defined deprecated" " kwargs under the `_deprecated_kwargs` class attribute. Make sure to either add the `**kwargs`" f" argument to {self.model_class}.__init__ if there are deprecated arguments or remove the" " deprecated argument from `_deprecated_kwargs = [<deprecated_argument>]`" ) @require_flax class FlaxDDPMSchedulerTest(FlaxSchedulerCommonTest): scheduler_classes = (FlaxDDPMScheduler,) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**kwargs) return config def test_timesteps(self): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_betas(self): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=schedule) def test_variance_type(self): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=variance) def test_clip_sample(self): for clip_sample in [True, False]: self.check_over_configs(clip_sample=clip_sample) def test_time_indices(self): for t in [0, 500, 999]: self.check_over_forward(time_step=t) def test_variance(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() assert jnp.sum(jnp.abs(scheduler._get_variance(state, 0) - 0.0)) < 1e-5 assert jnp.sum(jnp.abs(scheduler._get_variance(state, 487) - 0.00979)) < 1e-5 assert jnp.sum(jnp.abs(scheduler._get_variance(state, 999) - 0.02)) < 1e-5 def test_full_loop_no_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() num_trained_timesteps = len(scheduler) model = self.dummy_model() sample = self.dummy_sample_deter key1, key2 = random.split(random.PRNGKey(0)) for t in reversed(range(num_trained_timesteps)): # 1. predict noise residual residual = model(sample, t) # 2. predict previous mean of sample x_t-1 output = scheduler.step(state, residual, t, sample, key1) pred_prev_sample = output.prev_sample state = output.state key1, key2 = random.split(key2) # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance sample = pred_prev_sample result_sum = jnp.sum(jnp.abs(sample)) result_mean = jnp.mean(jnp.abs(sample)) if jax_device == "tpu": assert abs(result_sum - 255.0714) < 1e-2 assert abs(result_mean - 0.332124) < 1e-3 else: assert abs(result_sum - 255.1113) < 1e-2 assert abs(result_mean - 0.332176) < 1e-3 @require_flax class FlaxDDIMSchedulerTest(FlaxSchedulerCommonTest): scheduler_classes = (FlaxDDIMScheduler,) forward_default_kwargs = (("num_inference_steps", 50),) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**kwargs) return config def full_loop(self, **config): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() key1, key2 = random.split(random.PRNGKey(0)) num_inference_steps = 10 model = self.dummy_model() sample = self.dummy_sample_deter state = scheduler.set_timesteps(state, num_inference_steps) for t in state.timesteps: residual = model(sample, t) output = scheduler.step(state, residual, t, sample) sample = output.prev_sample state = output.state key1, key2 = random.split(key2) return sample def check_over_configs(self, time_step=0, **config): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: sample, _ = self.dummy_sample residual = 0.1 * sample scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps) new_state = new_scheduler.set_timesteps(new_state, num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps output = scheduler.step(state, residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step(new_state, residual, time_step, sample, **kwargs).prev_sample assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def test_from_save_pretrained(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: sample, _ = self.dummy_sample residual = 0.1 * sample scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps) new_state = new_scheduler.set_timesteps(new_state, num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps output = scheduler.step(state, residual, 1, sample, **kwargs).prev_sample new_output = new_scheduler.step(new_state, residual, 1, sample, **kwargs).prev_sample assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def check_over_forward(self, time_step=0, **forward_kwargs): kwargs = dict(self.forward_default_kwargs) kwargs.update(forward_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: sample, _ = self.dummy_sample residual = 0.1 * sample scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps) new_state = new_scheduler.set_timesteps(new_state, num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps output = scheduler.step(state, residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step(new_state, residual, time_step, sample, **kwargs).prev_sample assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def test_scheduler_outputs_equivalence(self): def set_nan_tensor_to_zero(t): return t.at[t != t].set(0) def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( jnp.allclose(set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5), msg=( "Tuple and dict output are not equal. Difference:" f" {jnp.max(jnp.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {jnp.isnan(tuple_object).any()} and `inf`: {jnp.isinf(tuple_object)}. Dict has" f" `nan`: {jnp.isnan(dict_object).any()} and `inf`: {jnp.isinf(dict_object)}." ), ) kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() sample, _ = self.dummy_sample residual = 0.1 * sample if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps outputs_dict = scheduler.step(state, residual, 0, sample, **kwargs) if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps outputs_tuple = scheduler.step(state, residual, 0, sample, return_dict=False, **kwargs) recursive_check(outputs_tuple[0], outputs_dict.prev_sample) def test_step_shape(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() sample, _ = self.dummy_sample residual = 0.1 * sample if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps output_0 = scheduler.step(state, residual, 0, sample, **kwargs).prev_sample output_1 = scheduler.step(state, residual, 1, sample, **kwargs).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) def test_timesteps(self): for timesteps in [100, 500, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_steps_offset(self): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=steps_offset) scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(steps_offset=1) scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() state = scheduler.set_timesteps(state, 5) assert jnp.equal(state.timesteps, jnp.array([801, 601, 401, 201, 1])).all() def test_betas(self): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=schedule) def test_time_indices(self): for t in [1, 10, 49]: self.check_over_forward(time_step=t) def test_inference_steps(self): for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500]): self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps) def test_variance(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() assert jnp.sum(jnp.abs(scheduler._get_variance(state, 0, 0) - 0.0)) < 1e-5 assert jnp.sum(jnp.abs(scheduler._get_variance(state, 420, 400) - 0.14771)) < 1e-5 assert jnp.sum(jnp.abs(scheduler._get_variance(state, 980, 960) - 0.32460)) < 1e-5 assert jnp.sum(jnp.abs(scheduler._get_variance(state, 0, 0) - 0.0)) < 1e-5 assert jnp.sum(jnp.abs(scheduler._get_variance(state, 487, 486) - 0.00979)) < 1e-5 assert jnp.sum(jnp.abs(scheduler._get_variance(state, 999, 998) - 0.02)) < 1e-5 def test_full_loop_no_noise(self): sample = self.full_loop() result_sum = jnp.sum(jnp.abs(sample)) result_mean = jnp.mean(jnp.abs(sample)) assert abs(result_sum - 172.0067) < 1e-2 assert abs(result_mean - 0.223967) < 1e-3 def test_full_loop_with_set_alpha_to_one(self): # We specify different beta, so that the first alpha is 0.99 sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01) result_sum = jnp.sum(jnp.abs(sample)) result_mean = jnp.mean(jnp.abs(sample)) if jax_device == "tpu": assert abs(result_sum - 149.8409) < 1e-2 assert abs(result_mean - 0.1951) < 1e-3 else: assert abs(result_sum - 149.8295) < 1e-2 assert abs(result_mean - 0.1951) < 1e-3 def test_full_loop_with_no_set_alpha_to_one(self): # We specify different beta, so that the first alpha is 0.99 sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01) result_sum = jnp.sum(jnp.abs(sample)) result_mean = jnp.mean(jnp.abs(sample)) if jax_device == "tpu": pass # FIXME: both result_sum and result_mean are nan on TPU # assert jnp.isnan(result_sum) # assert jnp.isnan(result_mean) else: assert abs(result_sum - 149.0784) < 1e-2 assert abs(result_mean - 0.1941) < 1e-3 def test_prediction_type(self): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) @require_flax class FlaxPNDMSchedulerTest(FlaxSchedulerCommonTest): scheduler_classes = (FlaxPNDMScheduler,) forward_default_kwargs = (("num_inference_steps", 50),) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**kwargs) return config def check_over_configs(self, time_step=0, **config): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) sample, _ = self.dummy_sample residual = 0.1 * sample dummy_past_residuals = jnp.array([residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape) # copy over dummy past residuals state = state.replace(ets=dummy_past_residuals[:]) with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) new_state = new_scheduler.set_timesteps(new_state, num_inference_steps, shape=sample.shape) # copy over dummy past residuals new_state = new_state.replace(ets=dummy_past_residuals[:]) (prev_sample, state) = scheduler.step_prk(state, residual, time_step, sample, **kwargs) (new_prev_sample, new_state) = new_scheduler.step_prk(new_state, residual, time_step, sample, **kwargs) assert jnp.sum(jnp.abs(prev_sample - new_prev_sample)) < 1e-5, "Scheduler outputs are not identical" output, _ = scheduler.step_plms(state, residual, time_step, sample, **kwargs) new_output, _ = new_scheduler.step_plms(new_state, residual, time_step, sample, **kwargs) assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def test_from_save_pretrained(self): pass def test_scheduler_outputs_equivalence(self): def set_nan_tensor_to_zero(t): return t.at[t != t].set(0) def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( jnp.allclose(set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5), msg=( "Tuple and dict output are not equal. Difference:" f" {jnp.max(jnp.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {jnp.isnan(tuple_object).any()} and `inf`: {jnp.isinf(tuple_object)}. Dict has" f" `nan`: {jnp.isnan(dict_object).any()} and `inf`: {jnp.isinf(dict_object)}." ), ) kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() sample, _ = self.dummy_sample residual = 0.1 * sample if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps outputs_dict = scheduler.step(state, residual, 0, sample, **kwargs) if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps outputs_tuple = scheduler.step(state, residual, 0, sample, return_dict=False, **kwargs) recursive_check(outputs_tuple[0], outputs_dict.prev_sample) def check_over_forward(self, time_step=0, **forward_kwargs): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) sample, _ = self.dummy_sample residual = 0.1 * sample dummy_past_residuals = jnp.array([residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape) # copy over dummy past residuals (must be after setting timesteps) scheduler.ets = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) # copy over dummy past residuals new_state = new_scheduler.set_timesteps(new_state, num_inference_steps, shape=sample.shape) # copy over dummy past residual (must be after setting timesteps) new_state.replace(ets=dummy_past_residuals[:]) output, state = scheduler.step_prk(state, residual, time_step, sample, **kwargs) new_output, new_state = new_scheduler.step_prk(new_state, residual, time_step, sample, **kwargs) assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" output, _ = scheduler.step_plms(state, residual, time_step, sample, **kwargs) new_output, _ = new_scheduler.step_plms(new_state, residual, time_step, sample, **kwargs) assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def full_loop(self, **config): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() num_inference_steps = 10 model = self.dummy_model() sample = self.dummy_sample_deter state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape) for i, t in enumerate(state.prk_timesteps): residual = model(sample, t) sample, state = scheduler.step_prk(state, residual, t, sample) for i, t in enumerate(state.plms_timesteps): residual = model(sample, t) sample, state = scheduler.step_plms(state, residual, t, sample) return sample def test_step_shape(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() sample, _ = self.dummy_sample residual = 0.1 * sample if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) dummy_past_residuals = jnp.array([residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]) state = state.replace(ets=dummy_past_residuals[:]) output_0, state = scheduler.step_prk(state, residual, 0, sample, **kwargs) output_1, state = scheduler.step_prk(state, residual, 1, sample, **kwargs) self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) output_0, state = scheduler.step_plms(state, residual, 0, sample, **kwargs) output_1, state = scheduler.step_plms(state, residual, 1, sample, **kwargs) self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) def test_timesteps(self): for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_steps_offset(self): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=steps_offset) scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(steps_offset=1) scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() state = scheduler.set_timesteps(state, 10, shape=()) assert jnp.equal( state.timesteps, jnp.array([901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]), ).all() def test_betas(self): for beta_start, beta_end in zip([0.0001, 0.001], [0.002, 0.02]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=schedule) def test_time_indices(self): for t in [1, 5, 10]: self.check_over_forward(time_step=t) def test_inference_steps(self): for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100]): self.check_over_forward(num_inference_steps=num_inference_steps) def test_pow_of_3_inference_steps(self): # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 num_inference_steps = 27 for scheduler_class in self.scheduler_classes: sample, _ = self.dummy_sample residual = 0.1 * sample scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(state.prk_timesteps[:2]): sample, state = scheduler.step_prk(state, residual, t, sample) def test_inference_plms_no_past_residuals(self): with self.assertRaises(ValueError): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() scheduler.step_plms(state, self.dummy_sample, 1, self.dummy_sample).prev_sample def test_full_loop_no_noise(self): sample = self.full_loop() result_sum = jnp.sum(jnp.abs(sample)) result_mean = jnp.mean(jnp.abs(sample)) if jax_device == "tpu": assert abs(result_sum - 198.1275) < 1e-2 assert abs(result_mean - 0.2580) < 1e-3 else: assert abs(result_sum - 198.1318) < 1e-2 assert abs(result_mean - 0.2580) < 1e-3 def test_full_loop_with_set_alpha_to_one(self): # We specify different beta, so that the first alpha is 0.99 sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01) result_sum = jnp.sum(jnp.abs(sample)) result_mean = jnp.mean(jnp.abs(sample)) if jax_device == "tpu": assert abs(result_sum - 186.83226) < 1e-2 assert abs(result_mean - 0.24327) < 1e-3 else: assert abs(result_sum - 186.9466) < 1e-2 assert abs(result_mean - 0.24342) < 1e-3 def test_full_loop_with_no_set_alpha_to_one(self): # We specify different beta, so that the first alpha is 0.99 sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01) result_sum = jnp.sum(jnp.abs(sample)) result_mean = jnp.mean(jnp.abs(sample)) if jax_device == "tpu": assert abs(result_sum - 186.83226) < 1e-2 assert abs(result_mean - 0.24327) < 1e-3 else: assert abs(result_sum - 186.9482) < 1e-2 assert abs(result_mean - 0.2434) < 1e-3
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/schedulers/test_scheduler_consistency_model.py
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class CMStochasticIterativeSchedulerTest(SchedulerCommonTest): scheduler_classes = (CMStochasticIterativeScheduler,) num_inference_steps = 10 def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 201, "sigma_min": 0.002, "sigma_max": 80.0, } config.update(**kwargs) return config # Override test_step_shape to add CMStochasticIterativeScheduler-specific logic regarding timesteps # Problem is that we don't know two timesteps that will always be in the timestep schedule from only the scheduler # config; scaled sigma_max is always in the timestep schedule, but sigma_min is in the sigma schedule while scaled # sigma_min is not in the timestep schedule def test_step_shape(self): num_inference_steps = 10 scheduler_config = self.get_scheduler_config() scheduler = self.scheduler_classes[0](**scheduler_config) scheduler.set_timesteps(num_inference_steps) timestep_0 = scheduler.timesteps[0] timestep_1 = scheduler.timesteps[1] sample = self.dummy_sample residual = 0.1 * sample output_0 = scheduler.step(residual, timestep_0, sample).prev_sample output_1 = scheduler.step(residual, timestep_1, sample).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) def test_timesteps(self): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_clip_denoised(self): for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=clip_denoised) def test_full_loop_no_noise_onestep(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) num_inference_steps = 1 scheduler.set_timesteps(num_inference_steps) timesteps = scheduler.timesteps generator = torch.manual_seed(0) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(timesteps): # 1. scale model input scaled_sample = scheduler.scale_model_input(sample, t) # 2. predict noise residual residual = model(scaled_sample, t) # 3. predict previous sample x_t-1 pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample sample = pred_prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 192.7614) < 1e-2 assert abs(result_mean.item() - 0.2510) < 1e-3 def test_full_loop_no_noise_multistep(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [106, 0] scheduler.set_timesteps(timesteps=timesteps) timesteps = scheduler.timesteps generator = torch.manual_seed(0) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input scaled_sample = scheduler.scale_model_input(sample, t) # 2. predict noise residual residual = model(scaled_sample, t) # 3. predict previous sample x_t-1 pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample sample = pred_prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 347.6357) < 1e-2 assert abs(result_mean.item() - 0.4527) < 1e-3 def test_full_loop_with_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) num_inference_steps = 10 t_start = 8 scheduler.set_timesteps(num_inference_steps) timesteps = scheduler.timesteps generator = torch.manual_seed(0) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma noise = self.dummy_noise_deter timesteps = scheduler.timesteps[t_start * scheduler.order :] sample = scheduler.add_noise(sample, noise, timesteps[:1]) for t in timesteps: # 1. scale model input scaled_sample = scheduler.scale_model_input(sample, t) # 2. predict noise residual residual = model(scaled_sample, t) # 3. predict previous sample x_t-1 pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample sample = pred_prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 763.9186) < 1e-2, f" expected result sum 763.9186, but get {result_sum}" assert abs(result_mean.item() - 0.9947) < 1e-3, f" expected result mean 0.9947, but get {result_mean}" def test_custom_timesteps_increasing_order(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [39, 30, 12, 15, 0] with self.assertRaises(ValueError, msg="`timesteps` must be in descending order."): scheduler.set_timesteps(timesteps=timesteps) def test_custom_timesteps_passing_both_num_inference_steps_and_timesteps(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [39, 30, 12, 1, 0] num_inference_steps = len(timesteps) with self.assertRaises(ValueError, msg="Can only pass one of `num_inference_steps` or `timesteps`."): scheduler.set_timesteps(num_inference_steps=num_inference_steps, timesteps=timesteps) def test_custom_timesteps_too_large(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [scheduler.config.num_train_timesteps] with self.assertRaises( ValueError, msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}", ): scheduler.set_timesteps(timesteps=timesteps)
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/schedulers/test_scheduler_euler.py
import torch from diffusers import EulerDiscreteScheduler from diffusers.utils.testing_utils import torch_device from .test_schedulers import SchedulerCommonTest class EulerDiscreteSchedulerTest(SchedulerCommonTest): scheduler_classes = (EulerDiscreteScheduler,) num_inference_steps = 10 def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1100, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**kwargs) return config def test_timesteps(self): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_betas(self): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=schedule) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_full_loop_no_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) generator = torch.manual_seed(0) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma sample = sample.to(torch_device) for i, t in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample, generator=generator) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 10.0807) < 1e-2 assert abs(result_mean.item() - 0.0131) < 1e-3 def test_full_loop_with_v_prediction(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(prediction_type="v_prediction") scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) generator = torch.manual_seed(0) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma sample = sample.to(torch_device) for i, t in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample, generator=generator) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 0.0002) < 1e-2 assert abs(result_mean.item() - 2.2676e-06) < 1e-3 def test_full_loop_device(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps, device=torch_device) generator = torch.manual_seed(0) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() sample = sample.to(torch_device) for t in scheduler.timesteps: sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample, generator=generator) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 10.0807) < 1e-2 assert abs(result_mean.item() - 0.0131) < 1e-3 def test_full_loop_device_karras_sigmas(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config, use_karras_sigmas=True) scheduler.set_timesteps(self.num_inference_steps, device=torch_device) generator = torch.manual_seed(0) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() sample = sample.to(torch_device) for t in scheduler.timesteps: sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample, generator=generator) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 124.52299499511719) < 1e-2 assert abs(result_mean.item() - 0.16213932633399963) < 1e-3 def test_full_loop_with_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) generator = torch.manual_seed(0) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma # add noise t_start = self.num_inference_steps - 2 noise = self.dummy_noise_deter noise = noise.to(sample.device) timesteps = scheduler.timesteps[t_start * scheduler.order :] sample = scheduler.add_noise(sample, noise, timesteps[:1]) for i, t in enumerate(timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample, generator=generator) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 57062.9297) < 1e-2, f" expected result sum 57062.9297, but get {result_sum}" assert abs(result_mean.item() - 74.3007) < 1e-3, f" expected result mean 74.3007, but get {result_mean}"
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/schedulers/test_scheduler_heun.py
import torch from diffusers import HeunDiscreteScheduler from diffusers.utils.testing_utils import torch_device from .test_schedulers import SchedulerCommonTest class HeunDiscreteSchedulerTest(SchedulerCommonTest): scheduler_classes = (HeunDiscreteScheduler,) num_inference_steps = 10 def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1100, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**kwargs) return config def test_timesteps(self): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_betas(self): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "scaled_linear", "exp"]: self.check_over_configs(beta_schedule=schedule) def test_clip_sample(self): for clip_sample_range in [1.0, 2.0, 3.0]: self.check_over_configs(clip_sample_range=clip_sample_range, clip_sample=True) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction", "sample"]: self.check_over_configs(prediction_type=prediction_type) def test_full_loop_no_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma sample = sample.to(torch_device) for i, t in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 0.1233) < 1e-2 assert abs(result_mean.item() - 0.0002) < 1e-3 else: # CUDA assert abs(result_sum.item() - 0.1233) < 1e-2 assert abs(result_mean.item() - 0.0002) < 1e-3 def test_full_loop_with_v_prediction(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(prediction_type="v_prediction") scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma sample = sample.to(torch_device) for i, t in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.6934e-07) < 1e-2 assert abs(result_mean.item() - 6.1112e-10) < 1e-3 else: # CUDA assert abs(result_sum.item() - 4.693428650170972e-07) < 1e-2 assert abs(result_mean.item() - 0.0002) < 1e-3 def test_full_loop_device(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps, device=torch_device) model = self.dummy_model() sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma for t in scheduler.timesteps: sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) if str(torch_device).startswith("cpu"): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 0.1233) < 1e-2 assert abs(result_mean.item() - 0.0002) < 1e-3 elif str(torch_device).startswith("mps"): # Larger tolerance on mps assert abs(result_mean.item() - 0.0002) < 1e-2 else: # CUDA assert abs(result_sum.item() - 0.1233) < 1e-2 assert abs(result_mean.item() - 0.0002) < 1e-3 def test_full_loop_device_karras_sigmas(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config, use_karras_sigmas=True) scheduler.set_timesteps(self.num_inference_steps, device=torch_device) model = self.dummy_model() sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma sample = sample.to(torch_device) for t in scheduler.timesteps: sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 0.00015) < 1e-2 assert abs(result_mean.item() - 1.9869554535034695e-07) < 1e-2 def test_full_loop_with_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma sample = sample.to(torch_device) t_start = self.num_inference_steps - 2 noise = self.dummy_noise_deter noise = noise.to(torch_device) timesteps = scheduler.timesteps[t_start * scheduler.order :] sample = scheduler.add_noise(sample, noise, timesteps[:1]) for i, t in enumerate(timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 75074.8906) < 1e-2, f" expected result sum 75074.8906, but get {result_sum}" assert abs(result_mean.item() - 97.7538) < 1e-3, f" expected result mean 97.7538, but get {result_mean}"
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/schedulers/test_scheduler_unipc.py
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class UniPCMultistepSchedulerTest(SchedulerCommonTest): scheduler_classes = (UniPCMultistepScheduler,) forward_default_kwargs = (("num_inference_steps", 25),) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "solver_order": 2, "solver_type": "bh2", } config.update(**kwargs) return config def check_over_configs(self, time_step=0, **config): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) sample = self.dummy_sample residual = 0.1 * sample dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) new_scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order] output, new_output = sample, sample for t in range(time_step, time_step + scheduler.config.solver_order + 1): t = scheduler.timesteps[t] output = scheduler.step(residual, t, output, **kwargs).prev_sample new_output = new_scheduler.step(residual, t, new_output, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def check_over_forward(self, time_step=0, **forward_kwargs): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) sample = self.dummy_sample residual = 0.1 * sample dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) # copy over dummy past residuals (must be after setting timesteps) scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) # copy over dummy past residuals new_scheduler.set_timesteps(num_inference_steps) # copy over dummy past residual (must be after setting timesteps) new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order] output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def full_loop(self, scheduler=None, **config): if scheduler is None: scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) num_inference_steps = 10 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_timesteps(num_inference_steps) for i, t in enumerate(scheduler.timesteps): residual = model(sample, t) sample = scheduler.step(residual, t, sample).prev_sample return sample def test_step_shape(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) sample = self.dummy_sample residual = 0.1 * sample if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): scheduler.set_timesteps(num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] time_step_0 = scheduler.timesteps[5] time_step_1 = scheduler.timesteps[6] output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) def test_switch(self): # make sure that iterating over schedulers with same config names gives same results # for defaults scheduler = UniPCMultistepScheduler(**self.get_scheduler_config()) sample = self.full_loop(scheduler=scheduler) result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.2464) < 1e-3 scheduler = DPMSolverSinglestepScheduler.from_config(scheduler.config) scheduler = DEISMultistepScheduler.from_config(scheduler.config) scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config) scheduler = UniPCMultistepScheduler.from_config(scheduler.config) sample = self.full_loop(scheduler=scheduler) result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.2464) < 1e-3 def test_timesteps(self): for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_thresholding(self): self.check_over_configs(thresholding=False) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=True, prediction_type=prediction_type, sample_max_value=threshold, solver_order=order, solver_type=solver_type, ) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_solver_order_and_type(self): for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=order, solver_type=solver_type, prediction_type=prediction_type, ) sample = self.full_loop( solver_order=order, solver_type=solver_type, prediction_type=prediction_type, ) assert not torch.isnan(sample).any(), "Samples have nan numbers" def test_lower_order_final(self): self.check_over_configs(lower_order_final=True) self.check_over_configs(lower_order_final=False) def test_inference_steps(self): for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=num_inference_steps, time_step=0) def test_full_loop_no_noise(self): sample = self.full_loop() result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.2464) < 1e-3 def test_full_loop_with_karras(self): sample = self.full_loop(use_karras_sigmas=True) result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.2925) < 1e-3 def test_full_loop_with_v_prediction(self): sample = self.full_loop(prediction_type="v_prediction") result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.1014) < 1e-3 def test_full_loop_with_karras_and_v_prediction(self): sample = self.full_loop(prediction_type="v_prediction", use_karras_sigmas=True) result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.1966) < 1e-3 def test_fp16_support(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(thresholding=True, dynamic_thresholding_ratio=0) scheduler = scheduler_class(**scheduler_config) num_inference_steps = 10 model = self.dummy_model() sample = self.dummy_sample_deter.half() scheduler.set_timesteps(num_inference_steps) for i, t in enumerate(scheduler.timesteps): residual = model(sample, t) sample = scheduler.step(residual, t, sample).prev_sample assert sample.dtype == torch.float16 def test_full_loop_with_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) num_inference_steps = 10 t_start = 8 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_timesteps(num_inference_steps) # add noise noise = self.dummy_noise_deter timesteps = scheduler.timesteps[t_start * scheduler.order :] sample = scheduler.add_noise(sample, noise, timesteps[:1]) for i, t in enumerate(timesteps): residual = model(sample, t) sample = scheduler.step(residual, t, sample).prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 315.5757) < 1e-2, f" expected result sum 315.5757, but get {result_sum}" assert abs(result_mean.item() - 0.4109) < 1e-3, f" expected result mean 0.4109, but get {result_mean}" class UniPCMultistepScheduler1DTest(UniPCMultistepSchedulerTest): @property def dummy_sample(self): batch_size = 4 num_channels = 3 width = 8 sample = torch.rand((batch_size, num_channels, width)) return sample @property def dummy_noise_deter(self): batch_size = 4 num_channels = 3 width = 8 num_elems = batch_size * num_channels * width sample = torch.arange(num_elems).flip(-1) sample = sample.reshape(num_channels, width, batch_size) sample = sample / num_elems sample = sample.permute(2, 0, 1) return sample @property def dummy_sample_deter(self): batch_size = 4 num_channels = 3 width = 8 num_elems = batch_size * num_channels * width sample = torch.arange(num_elems) sample = sample.reshape(num_channels, width, batch_size) sample = sample / num_elems sample = sample.permute(2, 0, 1) return sample def test_switch(self): # make sure that iterating over schedulers with same config names gives same results # for defaults scheduler = UniPCMultistepScheduler(**self.get_scheduler_config()) sample = self.full_loop(scheduler=scheduler) result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.2441) < 1e-3 scheduler = DPMSolverSinglestepScheduler.from_config(scheduler.config) scheduler = DEISMultistepScheduler.from_config(scheduler.config) scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config) scheduler = UniPCMultistepScheduler.from_config(scheduler.config) sample = self.full_loop(scheduler=scheduler) result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.2441) < 1e-3 def test_full_loop_no_noise(self): sample = self.full_loop() result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.2441) < 1e-3 def test_full_loop_with_karras(self): sample = self.full_loop(use_karras_sigmas=True) result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.2898) < 1e-3 def test_full_loop_with_v_prediction(self): sample = self.full_loop(prediction_type="v_prediction") result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.1014) < 1e-3 def test_full_loop_with_karras_and_v_prediction(self): sample = self.full_loop(prediction_type="v_prediction", use_karras_sigmas=True) result_mean = torch.mean(torch.abs(sample)) assert abs(result_mean.item() - 0.1944) < 1e-3 def test_full_loop_with_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) num_inference_steps = 10 t_start = 8 model = self.dummy_model() sample = self.dummy_sample_deter scheduler.set_timesteps(num_inference_steps) # add noise noise = self.dummy_noise_deter timesteps = scheduler.timesteps[t_start * scheduler.order :] sample = scheduler.add_noise(sample, noise, timesteps[:1]) for i, t in enumerate(timesteps): residual = model(sample, t) sample = scheduler.step(residual, t, sample).prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 39.0870) < 1e-2, f" expected result sum 39.0870, but get {result_sum}" assert abs(result_mean.item() - 0.4072) < 1e-3, f" expected result mean 0.4072, but get {result_mean}"
0
hf_public_repos/diffusers/tests/fixtures
hf_public_repos/diffusers/tests/fixtures/custom_pipeline/pipeline.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class CustomLocalPipeline(DiffusionPipeline): r""" This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Parameters: unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of [`DDPMScheduler`], or [`DDIMScheduler`]. """ def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__( self, batch_size: int = 1, generator: Optional[torch.Generator] = None, num_inference_steps: int = 50, output_type: Optional[str] = "pil", return_dict: bool = True, **kwargs, ) -> Union[ImagePipelineOutput, Tuple]: r""" Args: batch_size (`int`, *optional*, defaults to 1): The number of images to generate. generator (`torch.Generator`, *optional*): A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. eta (`float`, *optional*, defaults to 0.0): The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM). num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images. """ # Sample gaussian noise to begin loop image = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=generator, ) image = image.to(self.device) # set step values self.scheduler.set_timesteps(num_inference_steps) for t in self.progress_bar(self.scheduler.timesteps): # 1. predict noise model_output model_output = self.unet(image, t).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to ฮท in paper and should be between [0, 1] # do x_t -> x_t-1 image = self.scheduler.step(model_output, t, image).prev_sample image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=image), "This is a local test"
0
hf_public_repos/diffusers/tests/fixtures
hf_public_repos/diffusers/tests/fixtures/custom_pipeline/what_ever.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers.pipelines.pipeline_utils import DiffusionPipeline, ImagePipelineOutput class CustomLocalPipeline(DiffusionPipeline): r""" This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Parameters: unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of [`DDPMScheduler`], or [`DDIMScheduler`]. """ def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__( self, batch_size: int = 1, generator: Optional[torch.Generator] = None, num_inference_steps: int = 50, output_type: Optional[str] = "pil", return_dict: bool = True, **kwargs, ) -> Union[ImagePipelineOutput, Tuple]: r""" Args: batch_size (`int`, *optional*, defaults to 1): The number of images to generate. generator (`torch.Generator`, *optional*): A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. eta (`float`, *optional*, defaults to 0.0): The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM). num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple. Returns: [`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images. """ # Sample gaussian noise to begin loop image = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=generator, ) image = image.to(self.device) # set step values self.scheduler.set_timesteps(num_inference_steps) for t in self.progress_bar(self.scheduler.timesteps): # 1. predict noise model_output model_output = self.unet(image, t).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to ฮท in paper and should be between [0, 1] # do x_t -> x_t-1 image = self.scheduler.step(model_output, t, image).prev_sample image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=image), "This is a local test"
0
hf_public_repos/diffusers
hf_public_repos/diffusers/docs/README.md
<!--- Copyright 2023- The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Generating the documentation To generate the documentation, you first have to build it. Several packages are necessary to build the doc, you can install them with the following command, at the root of the code repository: ```bash pip install -e ".[docs]" ``` Then you need to install our open source documentation builder tool: ```bash pip install git+https://github.com/huggingface/doc-builder ``` --- **NOTE** You only need to generate the documentation to inspect it locally (if you're planning changes and want to check how they look before committing for instance). You don't have to commit the built documentation. --- ## Previewing the documentation To preview the docs, first install the `watchdog` module with: ```bash pip install watchdog ``` Then run the following command: ```bash doc-builder preview {package_name} {path_to_docs} ``` For example: ```bash doc-builder preview diffusers docs/source/en ``` The docs will be viewable at [http://localhost:3000](http://localhost:3000). You can also preview the docs once you have opened a PR. You will see a bot add a comment to a link where the documentation with your changes lives. --- **NOTE** The `preview` command only works with existing doc files. When you add a completely new file, you need to update `_toctree.yml` & restart `preview` command (`ctrl-c` to stop it & call `doc-builder preview ...` again). --- ## Adding a new element to the navigation bar Accepted files are Markdown (.md). Create a file with its extension and put it in the source directory. You can then link it to the toc-tree by putting the filename without the extension in the [`_toctree.yml`](https://github.com/huggingface/diffusers/blob/main/docs/source/en/_toctree.yml) file. ## Renaming section headers and moving sections It helps to keep the old links working when renaming the section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums, and Social media and it'd make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information. Therefore, we simply keep a little map of moved sections at the end of the document where the original section was. The key is to preserve the original anchor. So if you renamed a section from: "Section A" to "Section B", then you can add at the end of the file: ```md Sections that were moved: [ <a href="#section-b">Section A</a><a id="section-a"></a> ] ``` and of course, if you moved it to another file, then: ```md Sections that were moved: [ <a href="../new-file#section-b">Section A</a><a id="section-a"></a> ] ``` Use the relative style to link to the new file so that the versioned docs continue to work. For an example of a rich moved section set please see the very end of [the transformers Trainer doc](https://github.com/huggingface/transformers/blob/main/docs/source/en/main_classes/trainer.md). ## Writing Documentation - Specification The `huggingface/diffusers` documentation follows the [Google documentation](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) style for docstrings, although we can write them directly in Markdown. ### Adding a new tutorial Adding a new tutorial or section is done in two steps: - Add a new Markdown (.md) file under `docs/source/<languageCode>`. - Link that file in `docs/source/<languageCode>/_toctree.yml` on the correct toc-tree. Make sure to put your new file under the proper section. It's unlikely to go in the first section (*Get Started*), so depending on the intended targets (beginners, more advanced users, or researchers) it should go in sections two, three, or four. ### Adding a new pipeline/scheduler When adding a new pipeline: - Create a file `xxx.md` under `docs/source/<languageCode>/api/pipelines` (don't hesitate to copy an existing file as template). - Link that file in (*Diffusers Summary*) section in `docs/source/api/pipelines/overview.md`, along with the link to the paper, and a colab notebook (if available). - Write a short overview of the diffusion model: - Overview with paper & authors - Paper abstract - Tips and tricks and how to use it best - Possible an end-to-end example of how to use it - Add all the pipeline classes that should be linked in the diffusion model. These classes should be added using our Markdown syntax. By default as follows: ``` [[autodoc]] XXXPipeline - all - __call__ ``` This will include every public method of the pipeline that is documented, as well as the `__call__` method that is not documented by default. If you just want to add additional methods that are not documented, you can put the list of all methods to add in a list that contains `all`. ``` [[autodoc]] XXXPipeline - all - __call__ - enable_attention_slicing - disable_attention_slicing - enable_xformers_memory_efficient_attention - disable_xformers_memory_efficient_attention ``` You can follow the same process to create a new scheduler under the `docs/source/<languageCode>/api/schedulers` folder. ### Writing source documentation Values that should be put in `code` should either be surrounded by backticks: \`like so\`. Note that argument names and objects like True, None, or any strings should usually be put in `code`. When mentioning a class, function, or method, it is recommended to use our syntax for internal links so that our tool adds a link to its documentation with this syntax: \[\`XXXClass\`\] or \[\`function\`\]. This requires the class or function to be in the main package. If you want to create a link to some internal class or function, you need to provide its path. For instance: \[\`pipelines.ImagePipelineOutput\`\]. This will be converted into a link with `pipelines.ImagePipelineOutput` in the description. To get rid of the path and only keep the name of the object you are linking to in the description, add a ~: \[\`~pipelines.ImagePipelineOutput\`\] will generate a link with `ImagePipelineOutput` in the description. The same works for methods so you can either use \[\`XXXClass.method\`\] or \[\`~XXXClass.method\`\]. #### Defining arguments in a method Arguments should be defined with the `Args:` (or `Arguments:` or `Parameters:`) prefix, followed by a line return and an indentation. The argument should be followed by its type, with its shape if it is a tensor, a colon, and its description: ``` Args: n_layers (`int`): The number of layers of the model. ``` If the description is too long to fit in one line, another indentation is necessary before writing the description after the argument. Here's an example showcasing everything so far: ``` Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AlbertTokenizer`]. See [`~PreTrainedTokenizer.encode`] and [`~PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) ``` For optional arguments or arguments with defaults we follow the following syntax: imagine we have a function with the following signature: ```py def my_function(x: str=None, a: float=3.14): ``` then its documentation should look like this: ``` Args: x (`str`, *optional*): This argument controls ... a (`float`, *optional*, defaults to `3.14`): This argument is used to ... ``` Note that we always omit the "defaults to \`None\`" when None is the default for any argument. Also note that even if the first line describing your argument type and its default gets long, you can't break it on several lines. You can however write as many lines as you want in the indented description (see the example above with `input_ids`). #### Writing a multi-line code block Multi-line code blocks can be useful for displaying examples. They are done between two lines of three backticks as usual in Markdown: ```` ``` # first line of code # second line # etc ``` ```` #### Writing a return block The return block should be introduced with the `Returns:` prefix, followed by a line return and an indentation. The first line should be the type of the return, followed by a line return. No need to indent further for the elements building the return. Here's an example of a single value return: ``` Returns: `List[int]`: A list of integers in the range [0, 1] --- 1 for a special token, 0 for a sequence token. ``` Here's an example of a tuple return, comprising several objects: ``` Returns: `tuple(torch.FloatTensor)` comprising various elements depending on the configuration ([`BertConfig`]) and inputs: - ** loss** (*optional*, returned when `masked_lm_labels` is provided) `torch.FloatTensor` of shape `(1,)` -- Total loss is the sum of the masked language modeling loss and the next sequence prediction (classification) loss. - **prediction_scores** (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`) -- Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). ``` #### Adding an image Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos, and other non-text files. We prefer to leverage a hf.co hosted `dataset` like the ones hosted on [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) in which to place these files and reference them by URL. We recommend putting them in the following dataset: [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images). If an external contribution, feel free to add the images to your PR and ask a Hugging Face member to migrate your images to this dataset. ## Styling the docstring We have an automatic script running with the `make style` command that will make sure that: - the docstrings fully take advantage of the line width - all code examples are formatted using black, like the code of the Transformers library This script may have some weird failures if you made a syntax mistake or if you uncover a bug. Therefore, it's recommended to commit your changes before running `make style`, so you can revert the changes done by that script easily.
0
hf_public_repos/diffusers
hf_public_repos/diffusers/docs/TRANSLATING.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> ### Translating the Diffusers documentation into your language As part of our mission to democratize machine learning, we'd love to make the Diffusers library available in many more languages! Follow the steps below if you want to help translate the documentation into your language ๐Ÿ™. **๐Ÿ—ž๏ธ Open an issue** To get started, navigate to the [Issues](https://github.com/huggingface/diffusers/issues) page of this repo and check if anyone else has opened an issue for your language. If not, open a new issue by selecting the "๐ŸŒ Translating a New Language?" from the "New issue" button. Once an issue exists, post a comment to indicate which chapters you'd like to work on, and we'll add your name to the list. **๐Ÿด Fork the repository** First, you'll need to [fork the Diffusers repo](https://docs.github.com/en/get-started/quickstart/fork-a-repo). You can do this by clicking on the **Fork** button on the top-right corner of this repo's page. Once you've forked the repo, you'll want to get the files on your local machine for editing. You can do that by cloning the fork with Git as follows: ```bash git clone https://github.com/<YOUR-USERNAME>/diffusers.git ``` **๐Ÿ“‹ Copy-paste the English version with a new language code** The documentation files are in one leading directory: - [`docs/source`](https://github.com/huggingface/diffusers/tree/main/docs/source): All the documentation materials are organized here by language. You'll only need to copy the files in the [`docs/source/en`](https://github.com/huggingface/diffusers/tree/main/docs/source/en) directory, so first navigate to your fork of the repo and run the following: ```bash cd ~/path/to/diffusers/docs cp -r source/en source/<LANG-ID> ``` Here, `<LANG-ID>` should be one of the ISO 639-1 or ISO 639-2 language codes -- see [here](https://www.loc.gov/standards/iso639-2/php/code_list.php) for a handy table. **โœ๏ธ Start translating** The fun part comes - translating the text! The first thing we recommend is translating the part of the `_toctree.yml` file that corresponds to your doc chapter. This file is used to render the table of contents on the website. > ๐Ÿ™‹ If the `_toctree.yml` file doesn't yet exist for your language, you can create one by copy-pasting from the English version and deleting the sections unrelated to your chapter. Just make sure it exists in the `docs/source/<LANG-ID>/` directory! The fields you should add are `local` (with the name of the file containing the translation; e.g. `autoclass_tutorial`), and `title` (with the title of the doc in your language; e.g. `Load pretrained instances with an AutoClass`) -- as a reference, here is the `_toctree.yml` for [English](https://github.com/huggingface/diffusers/blob/main/docs/source/en/_toctree.yml): ```yaml - sections: - local: pipeline_tutorial # Do not change this! Use the same name for your .md file title: Pipelines for inference # Translate this! ... title: Tutorials # Translate this! ``` Once you have translated the `_toctree.yml` file, you can start translating the [MDX](https://mdxjs.com/) files associated with your docs chapter. > ๐Ÿ™‹ If you'd like others to help you with the translation, you should [open an issue](https://github.com/huggingface/diffusers/issues) and tag @patrickvonplaten.
0
hf_public_repos/diffusers/docs
hf_public_repos/diffusers/docs/source/_config.py
# docstyle-ignore INSTALL_CONTENT = """ # Diffusers installation ! pip install diffusers transformers datasets accelerate # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/diffusers.git """ notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}]
0
hf_public_repos/diffusers/docs/source
hf_public_repos/diffusers/docs/source/ko/in_translation.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ๋ฒˆ์—ญ์ค‘ ์—ด์‹ฌํžˆ ๋ฒˆ์—ญ์„ ์ง„ํ–‰์ค‘์ž…๋‹ˆ๋‹ค. ์กฐ๊ธˆ๋งŒ ๊ธฐ๋‹ค๋ ค์ฃผ์„ธ์š”. ๊ฐ์‚ฌํ•ฉ๋‹ˆ๋‹ค!
0
hf_public_repos/diffusers/docs/source
hf_public_repos/diffusers/docs/source/ko/installation.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ์„ค์น˜ ์‚ฌ์šฉํ•˜์‹œ๋Š” ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ์— ๋งž๋Š” ๐Ÿค— Diffusers๋ฅผ ์„ค์น˜ํ•˜์„ธ์š”. ๐Ÿค— Diffusers๋Š” Python 3.8+, PyTorch 1.7.0+ ๋ฐ flax์—์„œ ํ…Œ์ŠคํŠธ๋˜์—ˆ์Šต๋‹ˆ๋‹ค. ์‚ฌ์šฉ์ค‘์ธ ๋”ฅ๋Ÿฌ๋‹ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ์— ๋Œ€ํ•œ ์•„๋ž˜์˜ ์„ค์น˜ ์•ˆ๋‚ด๋ฅผ ๋”ฐ๋ฅด์„ธ์š”. - [PyTorch ์„ค์น˜ ์•ˆ๋‚ด](https://pytorch.org/get-started/locally/) - [Flax ์„ค์น˜ ์•ˆ๋‚ด](https://flax.readthedocs.io/en/latest/) ## pip๋ฅผ ์ด์šฉํ•œ ์„ค์น˜ [๊ฐ€์ƒ ํ™˜๊ฒฝ](https://docs.python.org/3/library/venv.html)์— ๐Ÿค— Diffusers๋ฅผ ์„ค์น˜ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. Python ๊ฐ€์ƒ ํ™˜๊ฒฝ์— ์ต์ˆ™ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ [๊ฐ€์ƒํ™˜๊ฒฝ pip ์„ค์น˜ ๊ฐ€์ด๋“œ](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/)๋ฅผ ์‚ดํŽด๋ณด์„ธ์š”. ๊ฐ€์ƒ ํ™˜๊ฒฝ์„ ์‚ฌ์šฉํ•˜๋ฉด ์„œ๋กœ ๋‹ค๋ฅธ ํ”„๋กœ์ ํŠธ๋ฅผ ๋” ์‰ฝ๊ฒŒ ๊ด€๋ฆฌํ•˜๊ณ , ์ข…์†์„ฑ๊ฐ„์˜ ํ˜ธํ™˜์„ฑ ๋ฌธ์ œ๋ฅผ ํ”ผํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ํ”„๋กœ์ ํŠธ ๋””๋ ‰ํ† ๋ฆฌ์— ๊ฐ€์ƒ ํ™˜๊ฒฝ์„ ์ƒ์„ฑํ•˜๋Š” ๊ฒƒ์œผ๋กœ ์‹œ์ž‘ํ•˜์„ธ์š”: ```bash python -m venv .env ``` ๊ทธ๋ฆฌ๊ณ  ๊ฐ€์ƒ ํ™˜๊ฒฝ์„ ํ™œ์„ฑํ™”ํ•ฉ๋‹ˆ๋‹ค: ```bash source .env/bin/activate ``` ์ด์ œ ๋‹ค์Œ์˜ ๋ช…๋ น์–ด๋กœ ๐Ÿค— Diffusers๋ฅผ ์„ค์น˜ํ•  ์ค€๋น„๊ฐ€ ๋˜์—ˆ์Šต๋‹ˆ๋‹ค: **PyTorch์˜ ๊ฒฝ์šฐ** ```bash pip install diffusers["torch"] ``` **Flax์˜ ๊ฒฝ์šฐ** ```bash pip install diffusers["flax"] ``` ## ์†Œ์Šค๋กœ๋ถ€ํ„ฐ ์„ค์น˜ ์†Œ์Šค์—์„œ `diffusers`๋ฅผ ์„ค์น˜ํ•˜๊ธฐ ์ „์—, `torch` ๋ฐ `accelerate`์ด ์„ค์น˜๋˜์–ด ์žˆ๋Š”์ง€ ํ™•์ธํ•˜์„ธ์š”. `torch` ์„ค์น˜์— ๋Œ€ํ•ด์„œ๋Š” [torch docs](https://pytorch.org/get-started/locally/#start-locally)๋ฅผ ์ฐธ๊ณ ํ•˜์„ธ์š”. ๋‹ค์Œ๊ณผ ๊ฐ™์ด `accelerate`์„ ์„ค์น˜ํ•˜์„ธ์š”. ```bash pip install accelerate ``` ๋‹ค์Œ ๋ช…๋ น์–ด๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์†Œ์Šค์—์„œ ๐Ÿค— Diffusers๋ฅผ ์„ค์น˜ํ•˜์„ธ์š”: ```bash pip install git+https://github.com/huggingface/diffusers ``` ์ด ๋ช…๋ น์–ด๋Š” ์ตœ์‹  `stable` ๋ฒ„์ „์ด ์•„๋‹Œ ์ตœ์ฒจ๋‹จ `main` ๋ฒ„์ „์„ ์„ค์น˜ํ•ฉ๋‹ˆ๋‹ค. `main` ๋ฒ„์ „์€ ์ตœ์‹  ๊ฐœ๋ฐœ ์ •๋ณด๋ฅผ ์ตœ์‹  ์ƒํƒœ๋กœ ์œ ์ง€ํ•˜๋Š” ๋ฐ ์œ ์šฉํ•ฉ๋‹ˆ๋‹ค. ์˜ˆ๋ฅผ ๋“ค์–ด ๋งˆ์ง€๋ง‰ ๊ณต์‹ ๋ฆด๋ฆฌ์ฆˆ ์ดํ›„ ๋ฒ„๊ทธ๊ฐ€ ์ˆ˜์ •๋˜์—ˆ์ง€๋งŒ, ์ƒˆ ๋ฆด๋ฆฌ์ฆˆ๊ฐ€ ์•„์ง ์ถœ์‹œ๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ์ž…๋‹ˆ๋‹ค. ๊ทธ๋Ÿฌ๋‚˜ ์ด๋Š” `main` ๋ฒ„์ „์ด ํ•ญ์ƒ ์•ˆ์ •์ ์ด์ง€ ์•Š์„ ์ˆ˜ ์žˆ์Œ์„ ์˜๋ฏธํ•ฉ๋‹ˆ๋‹ค. ์šฐ๋ฆฌ๋Š” `main` ๋ฒ„์ „์ด ์ง€์†์ ์œผ๋กœ ์ž‘๋™ํ•˜๋„๋ก ๋…ธ๋ ฅํ•˜๊ณ  ์žˆ์œผ๋ฉฐ, ๋Œ€๋ถ€๋ถ„์˜ ๋ฌธ์ œ๋Š” ๋ณดํ†ต ๋ช‡ ์‹œ๊ฐ„ ๋˜๋Š” ํ•˜๋ฃจ ์•ˆ์— ํ•ด๊ฒฐ๋ฉ๋‹ˆ๋‹ค. ๋ฌธ์ œ๊ฐ€ ๋ฐœ์ƒํ•˜๋ฉด ๋” ๋นจ๋ฆฌ ํ•ด๊ฒฐํ•  ์ˆ˜ ์žˆ๋„๋ก [Issue](https://github.com/huggingface/transformers/issues)๋ฅผ ์—ด์–ด์ฃผ์„ธ์š”! ## ํŽธ์ง‘๊ฐ€๋Šฅํ•œ ์„ค์น˜ ๋‹ค์Œ์„ ์ˆ˜ํ–‰ํ•˜๋ ค๋ฉด ํŽธ์ง‘๊ฐ€๋Šฅํ•œ ์„ค์น˜๊ฐ€ ํ•„์š”ํ•ฉ๋‹ˆ๋‹ค: * ์†Œ์Šค ์ฝ”๋“œ์˜ `main` ๋ฒ„์ „์„ ์‚ฌ์šฉ * ๐Ÿค— Diffusers์— ๊ธฐ์—ฌ (์ฝ”๋“œ์˜ ๋ณ€๊ฒฝ ์‚ฌํ•ญ์„ ํ…Œ์ŠคํŠธํ•˜๊ธฐ ์œ„ํ•ด ํ•„์š”) ์ €์žฅ์†Œ๋ฅผ ๋ณต์ œํ•˜๊ณ  ๋‹ค์Œ ๋ช…๋ น์–ด๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ๐Ÿค— Diffusers๋ฅผ ์„ค์น˜ํ•ฉ๋‹ˆ๋‹ค: ```bash git clone https://github.com/huggingface/diffusers.git cd diffusers ``` **PyTorch์˜ ๊ฒฝ์šฐ** ``` pip install -e ".[torch]" ``` **Flax์˜ ๊ฒฝ์šฐ** ``` pip install -e ".[flax]" ``` ์ด๋Ÿฌํ•œ ๋ช…๋ น์–ด๋“ค์€ ์ €์žฅ์†Œ๋ฅผ ๋ณต์ œํ•œ ํด๋”์™€ Python ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ ๊ฒฝ๋กœ๋ฅผ ์—ฐ๊ฒฐํ•ฉ๋‹ˆ๋‹ค. Python์€ ์ด์ œ ์ผ๋ฐ˜ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ ๊ฒฝ๋กœ์— ๋”ํ•˜์—ฌ ๋ณต์ œํ•œ ํด๋” ๋‚ด๋ถ€๋ฅผ ์‚ดํŽด๋ด…๋‹ˆ๋‹ค. ์˜ˆ๋ฅผ๋“ค์–ด Python ํŒจํ‚ค์ง€๊ฐ€ `~/anaconda3/envs/main/lib/python3.8/site-packages/`์— ์„ค์น˜๋˜์–ด ์žˆ๋Š” ๊ฒฝ์šฐ Python์€ ๋ณต์ œํ•œ ํด๋”์ธ `~/diffusers/`๋„ ๊ฒ€์ƒ‰ํ•ฉ๋‹ˆ๋‹ค. <Tip warning={true}> ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๋ฅผ ๊ณ„์† ์‚ฌ์šฉํ•˜๋ ค๋ฉด `diffusers` ํด๋”๋ฅผ ์œ ์ง€ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. </Tip> ์ด์ œ ๋‹ค์Œ ๋ช…๋ น์–ด๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์ตœ์‹  ๋ฒ„์ „์˜ ๐Ÿค— Diffusers๋กœ ์‰ฝ๊ฒŒ ์—…๋ฐ์ดํŠธํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```bash cd ~/diffusers/ git pull ``` ์ด๋ ‡๊ฒŒ ํ•˜๋ฉด, ๋‹ค์Œ์— ์‹คํ–‰ํ•  ๋•Œ Python ํ™˜๊ฒฝ์ด ๐Ÿค— Diffusers์˜ `main` ๋ฒ„์ „์„ ์ฐพ๊ฒŒ ๋ฉ๋‹ˆ๋‹ค. ## ํ…”๋ ˆ๋ฉ”ํŠธ๋ฆฌ ๋กœ๊น…์— ๋Œ€ํ•œ ์•Œ๋ฆผ ์šฐ๋ฆฌ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๋Š” `from_pretrained()` ์š”์ฒญ ์ค‘์— ํ…”๋ ˆ๋ฉ”ํŠธ๋ฆฌ ์ •๋ณด๋ฅผ ์›๊ฒฉ์œผ๋กœ ์ˆ˜์ง‘ํ•ฉ๋‹ˆ๋‹ค. ์ด ๋ฐ์ดํ„ฐ์—๋Š” Diffusers ๋ฐ PyTorch/Flax์˜ ๋ฒ„์ „, ์š”์ฒญ๋œ ๋ชจ๋ธ ๋˜๋Š” ํŒŒ์ดํ”„๋ผ์ธ ํด๋ž˜์Šค, ๊ทธ๋ฆฌ๊ณ  ํ—ˆ๋ธŒ์—์„œ ํ˜ธ์ŠคํŒ…๋˜๋Š” ๊ฒฝ์šฐ ์‚ฌ์ „ํ•™์Šต๋œ ์ฒดํฌํฌ์ธํŠธ์— ๋Œ€ํ•œ ๊ฒฝ๋กœ๋ฅผ ํฌํ•จํ•ฉ๋‹ˆ๋‹ค. ์ด ์‚ฌ์šฉ ๋ฐ์ดํ„ฐ๋Š” ๋ฌธ์ œ๋ฅผ ๋””๋ฒ„๊น…ํ•˜๊ณ  ์ƒˆ๋กœ์šด ๊ธฐ๋Šฅ์˜ ์šฐ์„ ์ˆœ์œ„๋ฅผ ์ง€์ •ํ•˜๋Š”๋ฐ ๋„์›€์ด ๋ฉ๋‹ˆ๋‹ค. ํ…”๋ ˆ๋ฉ”ํŠธ๋ฆฌ๋Š” HuggingFace ํ—ˆ๋ธŒ์—์„œ ๋ชจ๋ธ๊ณผ ํŒŒ์ดํ”„๋ผ์ธ์„ ๋ถˆ๋Ÿฌ์˜ฌ ๋•Œ๋งŒ ์ „์†ก๋˜๋ฉฐ, ๋กœ์ปฌ ์‚ฌ์šฉ ์ค‘์—๋Š” ์ˆ˜์ง‘๋˜์ง€ ์•Š์Šต๋‹ˆ๋‹ค. ์šฐ๋ฆฌ๋Š” ์ถ”๊ฐ€ ์ •๋ณด๋ฅผ ๊ณต์œ ํ•˜์ง€ ์•Š๊ธฐ๋ฅผ ์›ํ•˜๋Š” ์‚ฌ๋žŒ์ด ์žˆ๋‹ค๋Š” ๊ฒƒ์„ ์ดํ•ดํ•˜๊ณ  ๊ฐœ์ธ ์ •๋ณด๋ฅผ ์กด์ค‘ํ•˜๋ฏ€๋กœ, ํ„ฐ๋ฏธ๋„์—์„œ `DISABLE_TELEMETRY` ํ™˜๊ฒฝ ๋ณ€์ˆ˜๋ฅผ ์„ค์ •ํ•˜์—ฌ ํ…”๋ ˆ๋ฉ”ํŠธ๋ฆฌ ์ˆ˜์ง‘์„ ๋น„ํ™œ์„ฑํ™”ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. Linux/MacOS์—์„œ: ```bash export DISABLE_TELEMETRY=YES ``` Windows์—์„œ: ```bash set DISABLE_TELEMETRY=YES ```
0
hf_public_repos/diffusers/docs/source
hf_public_repos/diffusers/docs/source/ko/_toctree.yml
- sections: - local: index title: "๐Ÿงจ Diffusers" - local: quicktour title: "ํ›‘์–ด๋ณด๊ธฐ" - local: stable_diffusion title: Stable Diffusion - local: installation title: "์„ค์น˜" title: "์‹œ์ž‘ํ•˜๊ธฐ" - sections: - local: tutorials/tutorial_overview title: ๊ฐœ์š” - local: using-diffusers/write_own_pipeline title: ๋ชจ๋ธ๊ณผ ์Šค์ผ€์ค„๋Ÿฌ ์ดํ•ดํ•˜๊ธฐ - local: in_translation title: AutoPipeline - local: tutorials/basic_training title: Diffusion ๋ชจ๋ธ ํ•™์Šตํ•˜๊ธฐ title: Tutorials - sections: - sections: - local: using-diffusers/loading_overview title: ๊ฐœ์š” - local: using-diffusers/loading title: ํŒŒ์ดํ”„๋ผ์ธ, ๋ชจ๋ธ, ์Šค์ผ€์ค„๋Ÿฌ ๋ถˆ๋Ÿฌ์˜ค๊ธฐ - local: using-diffusers/schedulers title: ๋‹ค๋ฅธ ์Šค์ผ€์ค„๋Ÿฌ๋“ค์„ ๊ฐ€์ ธ์˜ค๊ณ  ๋น„๊ตํ•˜๊ธฐ - local: using-diffusers/custom_pipeline_overview title: ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ ๋ถˆ๋Ÿฌ์˜ค๊ธฐ - local: using-diffusers/using_safetensors title: ์„ธ์ดํ”„ํ…์„œ ๋ถˆ๋Ÿฌ์˜ค๊ธฐ - local: using-diffusers/other-formats title: ๋‹ค๋ฅธ ํ˜•์‹์˜ Stable Diffusion ๋ถˆ๋Ÿฌ์˜ค๊ธฐ - local: in_translation title: Hub์— ํŒŒ์ผ pushํ•˜๊ธฐ title: ๋ถˆ๋Ÿฌ์˜ค๊ธฐ & ํ—ˆ๋ธŒ - sections: - local: using-diffusers/pipeline_overview title: ๊ฐœ์š” - local: using-diffusers/unconditional_image_generation title: Unconditional ์ด๋ฏธ์ง€ ์ƒ์„ฑ - local: using-diffusers/conditional_image_generation title: Text-to-image ์ƒ์„ฑ - local: using-diffusers/img2img title: Text-guided image-to-image - local: using-diffusers/inpaint title: Text-guided ์ด๋ฏธ์ง€ ์ธํŽ˜์ธํŒ… - local: using-diffusers/depth2img title: Text-guided depth-to-image - local: using-diffusers/textual_inversion_inference title: Textual inversion - local: training/distributed_inference title: ์—ฌ๋Ÿฌ GPU๋ฅผ ์‚ฌ์šฉํ•œ ๋ถ„์‚ฐ ์ถ”๋ก  - local: in_translation title: Distilled Stable Diffusion ์ถ”๋ก  - local: using-diffusers/reusing_seeds title: Deterministic ์ƒ์„ฑ์œผ๋กœ ์ด๋ฏธ์ง€ ํ€„๋ฆฌํ‹ฐ ๋†’์ด๊ธฐ - local: using-diffusers/control_brightness title: ์ด๋ฏธ์ง€ ๋ฐ๊ธฐ ์กฐ์ •ํ•˜๊ธฐ - local: using-diffusers/reproducibility title: ์žฌํ˜„ ๊ฐ€๋Šฅํ•œ ํŒŒ์ดํ”„๋ผ์ธ ์ƒ์„ฑํ•˜๊ธฐ - local: using-diffusers/custom_pipeline_examples title: ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ๋“ค - local: using-diffusers/contribute_pipeline title: ์ปค๋ฎคํ‹ฐ๋‹ˆ ํŒŒ์ดํ”„๋ผ์ธ์— ๊ธฐ์—ฌํ•˜๋Š” ๋ฐฉ๋ฒ• - local: using-diffusers/stable_diffusion_jax_how_to title: JAX/Flax์—์„œ์˜ Stable Diffusion - local: using-diffusers/weighted_prompts title: Weighting Prompts title: ์ถ”๋ก ์„ ์œ„ํ•œ ํŒŒ์ดํ”„๋ผ์ธ - sections: - local: training/overview title: ๊ฐœ์š” - local: training/create_dataset title: ํ•™์Šต์„ ์œ„ํ•œ ๋ฐ์ดํ„ฐ์…‹ ์ƒ์„ฑํ•˜๊ธฐ - local: training/adapt_a_model title: ์ƒˆ๋กœ์šด ํƒœ์Šคํฌ์— ๋ชจ๋ธ ์ ์šฉํ•˜๊ธฐ - local: training/unconditional_training title: Unconditional ์ด๋ฏธ์ง€ ์ƒ์„ฑ - local: training/text_inversion title: Textual Inversion - local: training/dreambooth title: DreamBooth - local: training/text2image title: Text-to-image - local: training/lora title: Low-Rank Adaptation of Large Language Models (LoRA) - local: training/controlnet title: ControlNet - local: training/instructpix2pix title: InstructPix2Pix ํ•™์Šต - local: training/custom_diffusion title: Custom Diffusion title: Training title: Diffusers ์‚ฌ์šฉํ•˜๊ธฐ - sections: - local: optimization/opt_overview title: ๊ฐœ์š” - local: optimization/fp16 title: ๋ฉ”๋ชจ๋ฆฌ์™€ ์†๋„ - local: optimization/torch2.0 title: Torch2.0 ์ง€์› - local: optimization/xformers title: xFormers - local: optimization/onnx title: ONNX - local: optimization/open_vino title: OpenVINO - local: optimization/coreml title: Core ML - local: optimization/mps title: MPS - local: optimization/habana title: Habana Gaudi - local: optimization/tome title: Token Merging title: ์ตœ์ ํ™”/ํŠน์ˆ˜ ํ•˜๋“œ์›จ์–ด - sections: - local: using-diffusers/controlling_generation title: ์ œ์–ด๋œ ์ƒ์„ฑ - local: in_translation title: Diffusion Models ํ‰๊ฐ€ํ•˜๊ธฐ title: ๊ฐœ๋… ๊ฐ€์ด๋“œ - sections: - sections: - sections: - local: api/pipelines/stable_diffusion/stable_diffusion_xl title: Stable Diffusion XL title: Stable Diffusion title: Pipelines title: API
0
hf_public_repos/diffusers/docs/source
hf_public_repos/diffusers/docs/source/ko/stable_diffusion.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ํšจ๊ณผ์ ์ด๊ณ  ํšจ์œจ์ ์ธ Diffusion [[open-in-colab]] ํŠน์ • ์Šคํƒ€์ผ๋กœ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•˜๊ฑฐ๋‚˜ ์›ํ•˜๋Š” ๋‚ด์šฉ์„ ํฌํ•จํ•˜๋„๋ก[`DiffusionPipeline`]์„ ์„ค์ •ํ•˜๋Š” ๊ฒƒ์€ ๊นŒ๋‹ค๋กœ์šธ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ข…์ข… ๋งŒ์กฑ์Šค๋Ÿฌ์šด ์ด๋ฏธ์ง€๋ฅผ ์–ป๊ธฐ๊นŒ์ง€ [`DiffusionPipeline`]์„ ์—ฌ๋Ÿฌ ๋ฒˆ ์‹คํ–‰ํ•ด์•ผ ํ•˜๋Š” ๊ฒฝ์šฐ๊ฐ€ ๋งŽ์Šต๋‹ˆ๋‹ค. ๊ทธ๋Ÿฌ๋‚˜ ๋ฌด์—์„œ ์œ ๋ฅผ ์ฐฝ์กฐํ•˜๋Š” ๊ฒƒ์€ ํŠนํžˆ ์ถ”๋ก ์„ ๋ฐ˜๋ณตํ•ด์„œ ์‹คํ–‰ํ•˜๋Š” ๊ฒฝ์šฐ ๊ณ„์‚ฐ ์ง‘์•ฝ์ ์ธ ํ”„๋กœ์„ธ์Šค์ž…๋‹ˆ๋‹ค. ๊ทธ๋ ‡๊ธฐ ๋•Œ๋ฌธ์— ํŒŒ์ดํ”„๋ผ์ธ์—์„œ *๊ณ„์‚ฐ*(์†๋„) ๋ฐ *๋ฉ”๋ชจ๋ฆฌ*(GPU RAM) ํšจ์œจ์„ฑ์„ ๊ทน๋Œ€ํ™”ํ•˜์—ฌ ์ถ”๋ก  ์ฃผ๊ธฐ ์‚ฌ์ด์˜ ์‹œ๊ฐ„์„ ๋‹จ์ถ•ํ•˜์—ฌ ๋” ๋น ๋ฅด๊ฒŒ ๋ฐ˜๋ณตํ•  ์ˆ˜ ์žˆ๋„๋ก ํ•˜๋Š” ๊ฒƒ์ด ์ค‘์š”ํ•ฉ๋‹ˆ๋‹ค. ์ด ํŠœํ† ๋ฆฌ์–ผ์—์„œ๋Š” [`DiffusionPipeline`]์„ ์‚ฌ์šฉํ•˜์—ฌ ๋” ๋น ๋ฅด๊ณ  ํšจ๊ณผ์ ์œผ๋กœ ์ƒ์„ฑํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ์•ˆ๋‚ดํ•ฉ๋‹ˆ๋‹ค. [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) ๋ชจ๋ธ์„ ๋ถˆ๋Ÿฌ์™€์„œ ์‹œ์ž‘ํ•ฉ๋‹ˆ๋‹ค: ```python from diffusers import DiffusionPipeline model_id = "runwayml/stable-diffusion-v1-5" pipeline = DiffusionPipeline.from_pretrained(model_id) ``` ์˜ˆ์ œ ํ”„๋กฌํ”„ํŠธ๋Š” "portrait of an old warrior chief" ์ด์ง€๋งŒ, ์ž์œ ๋กญ๊ฒŒ ์ž์‹ ๋งŒ์˜ ํ”„๋กฌํ”„ํŠธ๋ฅผ ์‚ฌ์šฉํ•ด๋„ ๋ฉ๋‹ˆ๋‹ค: ```python prompt = "portrait photo of a old warrior chief" ``` ## ์†๋„ <Tip> ๐Ÿ’ก GPU์— ์•ก์„ธ์Šคํ•  ์ˆ˜ ์—†๋Š” ๊ฒฝ์šฐ ๋‹ค์Œ๊ณผ ๊ฐ™์€ GPU ์ œ๊ณต์—…์ฒด์—์„œ ๋ฌด๋ฃŒ๋กœ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค!. [Colab](https://colab.research.google.com/) </Tip> ์ถ”๋ก  ์†๋„๋ฅผ ๋†’์ด๋Š” ๊ฐ€์žฅ ๊ฐ„๋‹จํ•œ ๋ฐฉ๋ฒ• ์ค‘ ํ•˜๋‚˜๋Š” Pytorch ๋ชจ๋“ˆ์„ ์‚ฌ์šฉํ•  ๋•Œ์™€ ๊ฐ™์€ ๋ฐฉ์‹์œผ๋กœ GPU์— ํŒŒ์ดํ”„๋ผ์ธ์„ ๋ฐฐ์น˜ํ•˜๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค: ```python pipeline = pipeline.to("cuda") ``` ๋™์ผํ•œ ์ด๋ฏธ์ง€๋ฅผ ์‚ฌ์šฉํ•˜๊ณ  ๊ฐœ์„ ํ•  ์ˆ˜ ์žˆ๋Š”์ง€ ํ™•์ธํ•˜๋ ค๋ฉด [`Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html)๋ฅผ ์‚ฌ์šฉํ•˜๊ณ  [์žฌํ˜„์„ฑ](./using-diffusers/reproducibility)์— ๋Œ€ํ•œ ์‹œ๋“œ๋ฅผ ์„ค์ •ํ•˜์„ธ์š”: ```python import torch generator = torch.Generator("cuda").manual_seed(0) ``` ์ด์ œ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```python image = pipeline(prompt, generator=generator).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_1.png"> </div> ์ด ํ”„๋กœ์„ธ์Šค๋Š” T4 GPU์—์„œ ์•ฝ 30์ดˆ๊ฐ€ ์†Œ์š”๋˜์—ˆ์Šต๋‹ˆ๋‹ค(ํ• ๋‹น๋œ GPU๊ฐ€ T4๋ณด๋‹ค ๋‚˜์€ ๊ฒฝ์šฐ ๋” ๋น ๋ฅผ ์ˆ˜ ์žˆ์Œ). ๊ธฐ๋ณธ์ ์œผ๋กœ [`DiffusionPipeline`]์€ 50๊ฐœ์˜ ์ถ”๋ก  ๋‹จ๊ณ„์— ๋Œ€ํ•ด ์ „์ฒด `float32` ์ •๋ฐ€๋„๋กœ ์ถ”๋ก ์„ ์‹คํ–‰ํ•ฉ๋‹ˆ๋‹ค. `float16`๊ณผ ๊ฐ™์€ ๋” ๋‚ฎ์€ ์ •๋ฐ€๋„๋กœ ์ „ํ™˜ํ•˜๊ฑฐ๋‚˜ ์ถ”๋ก  ๋‹จ๊ณ„๋ฅผ ๋” ์ ๊ฒŒ ์‹คํ–‰ํ•˜์—ฌ ์†๋„๋ฅผ ๋†’์ผ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. `float16`์œผ๋กœ ๋ชจ๋ธ์„ ๋กœ๋“œํ•˜๊ณ  ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•ด ๋ณด๊ฒ ์Šต๋‹ˆ๋‹ค: ```python import torch pipeline = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) pipeline = pipeline.to("cuda") generator = torch.Generator("cuda").manual_seed(0) image = pipeline(prompt, generator=generator).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_2.png"> </div> ์ด๋ฒˆ์—๋Š” ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•˜๋Š” ๋ฐ ์•ฝ 11์ดˆ๋ฐ–์— ๊ฑธ๋ฆฌ์ง€ ์•Š์•„ ์ด์ „๋ณด๋‹ค 3๋ฐฐ ๊ฐ€๊นŒ์ด ๋นจ๋ผ์กŒ์Šต๋‹ˆ๋‹ค! <Tip> ๐Ÿ’ก ํŒŒ์ดํ”„๋ผ์ธ์€ ํ•ญ์ƒ `float16`์—์„œ ์‹คํ–‰ํ•  ๊ฒƒ์„ ๊ฐ•๋ ฅํžˆ ๊ถŒ์žฅํ•˜๋ฉฐ, ์ง€๊ธˆ๊นŒ์ง€ ์ถœ๋ ฅ ํ’ˆ์งˆ์ด ์ €ํ•˜๋˜๋Š” ๊ฒฝ์šฐ๋Š” ๊ฑฐ์˜ ์—†์—ˆ์Šต๋‹ˆ๋‹ค. </Tip> ๋˜ ๋‹ค๋ฅธ ์˜ต์…˜์€ ์ถ”๋ก  ๋‹จ๊ณ„์˜ ์ˆ˜๋ฅผ ์ค„์ด๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค. ๋ณด๋‹ค ํšจ์œจ์ ์ธ ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ์„ ํƒํ•˜๋ฉด ์ถœ๋ ฅ ํ’ˆ์งˆ ์ €ํ•˜ ์—†์ด ๋‹จ๊ณ„ ์ˆ˜๋ฅผ ์ค„์ด๋Š” ๋ฐ ๋„์›€์ด ๋  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ํ˜„์žฌ ๋ชจ๋ธ๊ณผ ํ˜ธํ™˜๋˜๋Š” ์Šค์ผ€์ค„๋Ÿฌ๋Š” `compatibles` ๋ฉ”์„œ๋“œ๋ฅผ ํ˜ธ์ถœํ•˜์—ฌ [`DiffusionPipeline`]์—์„œ ์ฐพ์„ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```python pipeline.scheduler.compatibles [ diffusers.schedulers.scheduling_lms_discrete.LMSDiscreteScheduler, diffusers.schedulers.scheduling_unipc_multistep.UniPCMultistepScheduler, diffusers.schedulers.scheduling_k_dpm_2_discrete.KDPM2DiscreteScheduler, diffusers.schedulers.scheduling_deis_multistep.DEISMultistepScheduler, diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler, diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler, diffusers.schedulers.scheduling_ddpm.DDPMScheduler, diffusers.schedulers.scheduling_dpmsolver_singlestep.DPMSolverSinglestepScheduler, diffusers.schedulers.scheduling_k_dpm_2_ancestral_discrete.KDPM2AncestralDiscreteScheduler, diffusers.schedulers.scheduling_heun_discrete.HeunDiscreteScheduler, diffusers.schedulers.scheduling_pndm.PNDMScheduler, diffusers.schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteScheduler, diffusers.schedulers.scheduling_ddim.DDIMScheduler, ] ``` Stable Diffusion ๋ชจ๋ธ์€ ์ผ๋ฐ˜์ ์œผ๋กœ ์•ฝ 50๊ฐœ์˜ ์ถ”๋ก  ๋‹จ๊ณ„๊ฐ€ ํ•„์š”ํ•œ [`PNDMScheduler`]๋ฅผ ๊ธฐ๋ณธ์œผ๋กœ ์‚ฌ์šฉํ•˜์ง€๋งŒ, [`DPMSolverMultistepScheduler`]์™€ ๊ฐ™์ด ์„ฑ๋Šฅ์ด ๋” ๋›ฐ์–ด๋‚œ ์Šค์ผ€์ค„๋Ÿฌ๋Š” ์•ฝ 20๊ฐœ ๋˜๋Š” 25๊ฐœ์˜ ์ถ”๋ก  ๋‹จ๊ณ„๋งŒ ํ•„์š”๋กœ ํ•ฉ๋‹ˆ๋‹ค. ์ƒˆ ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ๋กœ๋“œํ•˜๋ ค๋ฉด [`ConfigMixin.from_config`] ๋ฉ”์„œ๋“œ๋ฅผ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค: ```python from diffusers import DPMSolverMultistepScheduler pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) ``` `num_inference_steps`๋ฅผ 20์œผ๋กœ ์„ค์ •ํ•ฉ๋‹ˆ๋‹ค: ```python generator = torch.Generator("cuda").manual_seed(0) image = pipeline(prompt, generator=generator, num_inference_steps=20).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_3.png"> </div> ์ถ”๋ก ์‹œ๊ฐ„์„ 4์ดˆ๋กœ ๋‹จ์ถ•ํ•  ์ˆ˜ ์žˆ์—ˆ์Šต๋‹ˆ๋‹ค! โšก๏ธ ## ๋ฉ”๋ชจ๋ฆฌ ํŒŒ์ดํ”„๋ผ์ธ ์„ฑ๋Šฅ ํ–ฅ์ƒ์˜ ๋˜ ๋‹ค๋ฅธ ํ•ต์‹ฌ์€ ๋ฉ”๋ชจ๋ฆฌ ์‚ฌ์šฉ๋Ÿ‰์„ ์ค„์ด๋Š” ๊ฒƒ์ธ๋ฐ, ์ดˆ๋‹น ์ƒ์„ฑ๋˜๋Š” ์ด๋ฏธ์ง€ ์ˆ˜๋ฅผ ์ตœ๋Œ€ํ™”ํ•˜๋ ค๊ณ  ํ•˜๋Š” ๊ฒฝ์šฐ๊ฐ€ ๋งŽ๊ธฐ ๋•Œ๋ฌธ์— ๊ฐ„์ ‘์ ์œผ๋กœ ๋” ๋น ๋ฅธ ์†๋„๋ฅผ ์˜๋ฏธํ•ฉ๋‹ˆ๋‹ค. ํ•œ ๋ฒˆ์— ์ƒ์„ฑํ•  ์ˆ˜ ์žˆ๋Š” ์ด๋ฏธ์ง€ ์ˆ˜๋ฅผ ํ™•์ธํ•˜๋Š” ๊ฐ€์žฅ ์‰ฌ์šด ๋ฐฉ๋ฒ•์€ `OutOfMemoryError`(OOM)์ด ๋ฐœ์ƒํ•  ๋•Œ๊นŒ์ง€ ๋‹ค์–‘ํ•œ ๋ฐฐ์น˜ ํฌ๊ธฐ๋ฅผ ์‹œ๋„ํ•ด ๋ณด๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค. ํ”„๋กฌํ”„ํŠธ ๋ชฉ๋ก๊ณผ `Generators`์—์„œ ์ด๋ฏธ์ง€ ๋ฐฐ์น˜๋ฅผ ์ƒ์„ฑํ•˜๋Š” ํ•จ์ˆ˜๋ฅผ ๋งŒ๋“ญ๋‹ˆ๋‹ค. ์ข‹์€ ๊ฒฐ๊ณผ๋ฅผ ์ƒ์„ฑํ•˜๋Š” ๊ฒฝ์šฐ ์žฌ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ๋„๋ก ๊ฐ `Generator`์— ์‹œ๋“œ๋ฅผ ํ• ๋‹นํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ```python def get_inputs(batch_size=1): generator = [torch.Generator("cuda").manual_seed(i) for i in range(batch_size)] prompts = batch_size * [prompt] num_inference_steps = 20 return {"prompt": prompts, "generator": generator, "num_inference_steps": num_inference_steps} ``` ๋˜ํ•œ ๊ฐ ์ด๋ฏธ์ง€ ๋ฐฐ์น˜๋ฅผ ๋ณด์—ฌ์ฃผ๋Š” ๊ธฐ๋Šฅ์ด ํ•„์š”ํ•ฉ๋‹ˆ๋‹ค: ```python from PIL import Image def image_grid(imgs, rows=2, cols=2): w, h = imgs[0].size grid = Image.new("RGB", size=(cols * w, rows * h)) for i, img in enumerate(imgs): grid.paste(img, box=(i % cols * w, i // cols * h)) return grid ``` `batch_size=4`๋ถ€ํ„ฐ ์‹œ์ž‘ํ•ด ์–ผ๋งˆ๋‚˜ ๋งŽ์€ ๋ฉ”๋ชจ๋ฆฌ๋ฅผ ์†Œ๋น„ํ–ˆ๋Š”์ง€ ํ™•์ธํ•ฉ๋‹ˆ๋‹ค: ```python images = pipeline(**get_inputs(batch_size=4)).images image_grid(images) ``` RAM์ด ๋” ๋งŽ์€ GPU๊ฐ€ ์•„๋‹ˆ๋ผ๋ฉด ์œ„์˜ ์ฝ”๋“œ์—์„œ `OOM` ์˜ค๋ฅ˜๊ฐ€ ๋ฐ˜ํ™˜๋˜์—ˆ์„ ๊ฒƒ์ž…๋‹ˆ๋‹ค! ๋Œ€๋ถ€๋ถ„์˜ ๋ฉ”๋ชจ๋ฆฌ๋Š” cross-attention ๋ ˆ์ด์–ด๊ฐ€ ์ฐจ์ง€ํ•ฉ๋‹ˆ๋‹ค. ์ด ์ž‘์—…์„ ๋ฐฐ์น˜๋กœ ์‹คํ–‰ํ•˜๋Š” ๋Œ€์‹  ์ˆœ์ฐจ์ ์œผ๋กœ ์‹คํ–‰ํ•˜๋ฉด ์ƒ๋‹นํ•œ ์–‘์˜ ๋ฉ”๋ชจ๋ฆฌ๋ฅผ ์ ˆ์•ฝํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ํŒŒ์ดํ”„๋ผ์ธ์„ ๊ตฌ์„ฑํ•˜์—ฌ [`~DiffusionPipeline.enable_attention_slicing`] ํ•จ์ˆ˜๋ฅผ ์‚ฌ์šฉํ•˜๊ธฐ๋งŒ ํ•˜๋ฉด ๋ฉ๋‹ˆ๋‹ค: ```python pipeline.enable_attention_slicing() ``` ์ด์ œ `batch_size`๋ฅผ 8๋กœ ๋Š˜๋ ค๋ณด์„ธ์š”! ```python images = pipeline(**get_inputs(batch_size=8)).images image_grid(images, rows=2, cols=4) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_5.png"> </div> ์ด์ „์—๋Š” 4๊ฐœ์˜ ์ด๋ฏธ์ง€๋ฅผ ๋ฐฐ์น˜๋กœ ์ƒ์„ฑํ•  ์ˆ˜๋„ ์—†์—ˆ์ง€๋งŒ, ์ด์ œ๋Š” ์ด๋ฏธ์ง€๋‹น ์•ฝ 3.5์ดˆ ๋งŒ์— 8๊ฐœ์˜ ์ด๋ฏธ์ง€๋ฅผ ๋ฐฐ์น˜๋กœ ์ƒ์„ฑํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค! ์ด๋Š” ์•„๋งˆ๋„ ํ’ˆ์งˆ ์ €ํ•˜ ์—†์ด T4 GPU์—์„œ ๊ฐ€์žฅ ๋น ๋ฅธ ์†๋„์ผ ๊ฒƒ์ž…๋‹ˆ๋‹ค. ## ํ’ˆ์งˆ ์ง€๋‚œ ๋‘ ์„น์…˜์—์„œ๋Š” `fp16`์„ ์‚ฌ์šฉํ•˜์—ฌ ํŒŒ์ดํ”„๋ผ์ธ์˜ ์†๋„๋ฅผ ์ตœ์ ํ™”ํ•˜๊ณ , ๋” ์„ฑ๋Šฅ์ด ์ข‹์€ ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์ถ”๋ก  ๋‹จ๊ณ„์˜ ์ˆ˜๋ฅผ ์ค„์ด๊ณ , attention slicing์„ ํ™œ์„ฑํ™”ํ•˜์—ฌ ๋ฉ”๋ชจ๋ฆฌ ์†Œ๋น„๋ฅผ ์ค„์ด๋Š” ๋ฐฉ๋ฒ•์„ ๋ฐฐ์› ์Šต๋‹ˆ๋‹ค. ์ด์ œ ์ƒ์„ฑ๋œ ์ด๋ฏธ์ง€์˜ ํ’ˆ์งˆ์„ ๊ฐœ์„ ํ•˜๋Š” ๋ฐฉ๋ฒ•์— ๋Œ€ํ•ด ์ง‘์ค‘์ ์œผ๋กœ ์•Œ์•„๋ณด๊ฒ ์Šต๋‹ˆ๋‹ค. ### ๋” ๋‚˜์€ ์ฒดํฌํฌ์ธํŠธ ๊ฐ€์žฅ ํ™•์‹คํ•œ ๋‹จ๊ณ„๋Š” ๋” ๋‚˜์€ ์ฒดํฌํฌ์ธํŠธ๋ฅผ ์‚ฌ์šฉํ•˜๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค. Stable Diffusion ๋ชจ๋ธ์€ ์ข‹์€ ์ถœ๋ฐœ์ ์ด๋ฉฐ, ๊ณต์‹ ์ถœ์‹œ ์ดํ›„ ๋ช‡ ๊ฐ€์ง€ ๊ฐœ์„ ๋œ ๋ฒ„์ „๋„ ์ถœ์‹œ๋˜์—ˆ์Šต๋‹ˆ๋‹ค. ํ•˜์ง€๋งŒ ์ตœ์‹  ๋ฒ„์ „์„ ์‚ฌ์šฉํ•œ๋‹ค๊ณ  ํ•ด์„œ ์ž๋™์œผ๋กœ ๋” ๋‚˜์€ ๊ฒฐ๊ณผ๋ฅผ ์–ป์„ ์ˆ˜ ์žˆ๋Š” ๊ฒƒ์€ ์•„๋‹™๋‹ˆ๋‹ค. ์—ฌ์ „ํžˆ ๋‹ค์–‘ํ•œ ์ฒดํฌํฌ์ธํŠธ๋ฅผ ์ง์ ‘ ์‹คํ—˜ํ•ด๋ณด๊ณ , [negative prompts](https://minimaxir.com/2022/11/stable-diffusion-negative-prompt/) ์‚ฌ์šฉ ๋“ฑ ์•ฝ๊ฐ„์˜ ์กฐ์‚ฌ๋ฅผ ํ†ตํ•ด ์ตœ์ƒ์˜ ๊ฒฐ๊ณผ๋ฅผ ์–ป์–ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ์ด ๋ถ„์•ผ๊ฐ€ ์„ฑ์žฅํ•จ์— ๋”ฐ๋ผ ํŠน์ • ์Šคํƒ€์ผ์„ ์—ฐ์ถœํ•  ์ˆ˜ ์žˆ๋„๋ก ์„ธ๋ฐ€ํ•˜๊ฒŒ ์กฐ์ •๋œ ๊ณ ํ’ˆ์งˆ ์ฒดํฌํฌ์ธํŠธ๊ฐ€ ์ ์  ๋” ๋งŽ์•„์ง€๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค. [Hub](https://huggingface.co/models?library=diffusers&sort=downloads)์™€ [Diffusers Gallery](https://huggingface.co/spaces/huggingface-projects/diffusers-gallery)๋ฅผ ๋‘˜๋Ÿฌ๋ณด๊ณ  ๊ด€์‹ฌ ์žˆ๋Š” ๊ฒƒ์„ ์ฐพ์•„๋ณด์„ธ์š”! ### ๋” ๋‚˜์€ ํŒŒ์ดํ”„๋ผ์ธ ๊ตฌ์„ฑ ์š”์†Œ ํ˜„์žฌ ํŒŒ์ดํ”„๋ผ์ธ ๊ตฌ์„ฑ ์š”์†Œ๋ฅผ ์ตœ์‹  ๋ฒ„์ „์œผ๋กœ ๊ต์ฒดํ•ด ๋ณผ ์ˆ˜๋„ ์žˆ์Šต๋‹ˆ๋‹ค. Stability AI์˜ ์ตœ์‹  [autodecoder](https://huggingface.co/stabilityai/stable-diffusion-2-1/tree/main/vae)๋ฅผ ํŒŒ์ดํ”„๋ผ์ธ์— ๋กœ๋“œํ•˜๊ณ  ๋ช‡ ๊ฐ€์ง€ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•ด ๋ณด๊ฒ ์Šต๋‹ˆ๋‹ค: ```python from diffusers import AutoencoderKL vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16).to("cuda") pipeline.vae = vae images = pipeline(**get_inputs(batch_size=8)).images image_grid(images, rows=2, cols=4) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_6.png"> </div> ### ๋” ๋‚˜์€ ํ”„๋กฌํ”„ํŠธ ์—”์ง€๋‹ˆ์–ด๋ง ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•˜๋Š” ๋ฐ ์‚ฌ์šฉํ•˜๋Š” ํ…์ŠคํŠธ ํ”„๋กฌํ”„ํŠธ๋Š” *prompt engineering*์ด๋ผ๊ณ  ํ•  ์ •๋„๋กœ ๋งค์šฐ ์ค‘์š”ํ•ฉ๋‹ˆ๋‹ค. ํ”„๋กฌํ”„ํŠธ ์—”์ง€๋‹ˆ์–ด๋ง ์‹œ ๊ณ ๋ คํ•ด์•ผ ํ•  ๋ช‡ ๊ฐ€์ง€ ์‚ฌํ•ญ์€ ๋‹ค์Œ๊ณผ ๊ฐ™์Šต๋‹ˆ๋‹ค: - ์ƒ์„ฑํ•˜๋ ค๋Š” ์ด๋ฏธ์ง€ ๋˜๋Š” ์œ ์‚ฌํ•œ ์ด๋ฏธ์ง€๊ฐ€ ์ธํ„ฐ๋„ท์— ์–ด๋–ป๊ฒŒ ์ €์žฅ๋˜์–ด ์žˆ๋Š”๊ฐ€? - ๋‚ด๊ฐ€ ์›ํ•˜๋Š” ์Šคํƒ€์ผ๋กœ ๋ชจ๋ธ์„ ์œ ๋„ํ•˜๊ธฐ ์œ„ํ•ด ์–ด๋–ค ์ถ”๊ฐ€ ์„ธ๋ถ€ ์ •๋ณด๋ฅผ ์ œ๊ณตํ•  ์ˆ˜ ์žˆ๋Š”๊ฐ€? ์ด๋ฅผ ์—ผ๋‘์— ๋‘๊ณ  ์ƒ‰์ƒ๊ณผ ๋” ๋†’์€ ํ’ˆ์งˆ์˜ ๋””ํ…Œ์ผ์„ ํฌํ•จํ•˜๋„๋ก ํ”„๋กฌํ”„ํŠธ๋ฅผ ๊ฐœ์„ ํ•ด ๋ด…์‹œ๋‹ค: ```python prompt += ", tribal panther make up, blue on red, side profile, looking away, serious eyes" prompt += " 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta" ``` ์ƒˆ๋กœ์šด ํ”„๋กฌํ”„ํŠธ๋กœ ์ด๋ฏธ์ง€ ๋ฐฐ์น˜๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค: ```python images = pipeline(**get_inputs(batch_size=8)).images image_grid(images, rows=2, cols=4) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_7.png"> </div> ๊ฝค ์ธ์ƒ์ ์ž…๋‹ˆ๋‹ค! `1`์˜ ์‹œ๋“œ๋ฅผ ๊ฐ€์ง„ `Generator`์— ํ•ด๋‹นํ•˜๋Š” ๋‘ ๋ฒˆ์งธ ์ด๋ฏธ์ง€์— ํ”ผ์‚ฌ์ฒด์˜ ๋‚˜์ด์— ๋Œ€ํ•œ ํ…์ŠคํŠธ๋ฅผ ์ถ”๊ฐ€ํ•˜์—ฌ ์กฐ๊ธˆ ๋” ์กฐ์ •ํ•ด ๋ณด๊ฒ ์Šต๋‹ˆ๋‹ค: ```python prompts = [ "portrait photo of the oldest warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", "portrait photo of a old warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", "portrait photo of a warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", "portrait photo of a young warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta", ] generator = [torch.Generator("cuda").manual_seed(1) for _ in range(len(prompts))] images = pipeline(prompt=prompts, generator=generator, num_inference_steps=25).images image_grid(images) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_8.png"> </div> ## ๋‹ค์Œ ๋‹จ๊ณ„ ์ด ํŠœํ† ๋ฆฌ์–ผ์—์„œ๋Š” ๊ณ„์‚ฐ ๋ฐ ๋ฉ”๋ชจ๋ฆฌ ํšจ์œจ์„ ๋†’์ด๊ณ  ์ƒ์„ฑ๋œ ์ถœ๋ ฅ์˜ ํ’ˆ์งˆ์„ ๊ฐœ์„ ํ•˜๊ธฐ ์œ„ํ•ด [`DiffusionPipeline`]์„ ์ตœ์ ํ™”ํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ๋ฐฐ์› ์Šต๋‹ˆ๋‹ค. ํŒŒ์ดํ”„๋ผ์ธ์„ ๋” ๋น ๋ฅด๊ฒŒ ๋งŒ๋“œ๋Š” ๋ฐ ๊ด€์‹ฌ์ด ์žˆ๋‹ค๋ฉด ๋‹ค์Œ ๋ฆฌ์†Œ์Šค๋ฅผ ์‚ดํŽด๋ณด์„ธ์š”: - [PyTorch 2.0](./optimization/torch2.0) ๋ฐ [`torch.compile`](https://pytorch.org/docs/stable/generated/torch.compile.html)์ด ์–ด๋–ป๊ฒŒ ์ถ”๋ก  ์†๋„๋ฅผ 5~300% ํ–ฅ์ƒ์‹œํ‚ฌ ์ˆ˜ ์žˆ๋Š”์ง€ ์•Œ์•„๋ณด์„ธ์š”. A100 GPU์—์„œ๋Š” ์ถ”๋ก  ์†๋„๊ฐ€ ์ตœ๋Œ€ 50%๊นŒ์ง€ ๋นจ๋ผ์งˆ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค! - PyTorch 2๋ฅผ ์‚ฌ์šฉํ•  ์ˆ˜ ์—†๋Š” ๊ฒฝ์šฐ, [xFormers](./optimization/xformers)๋ฅผ ์„ค์น˜ํ•˜๋Š” ๊ฒƒ์ด ์ข‹์Šต๋‹ˆ๋‹ค. ๋ฉ”๋ชจ๋ฆฌ ํšจ์œจ์ ์ธ ์–ดํ…์…˜ ๋ฉ”์ปค๋‹ˆ์ฆ˜์€ PyTorch 1.13.1๊ณผ ํ•จ๊ป˜ ์‚ฌ์šฉํ•˜๋ฉด ์†๋„๊ฐ€ ๋นจ๋ผ์ง€๊ณ  ๋ฉ”๋ชจ๋ฆฌ ์†Œ๋น„๊ฐ€ ์ค„์–ด๋“ญ๋‹ˆ๋‹ค. - ๋ชจ๋ธ ์˜คํ”„๋กœ๋”ฉ๊ณผ ๊ฐ™์€ ๋‹ค๋ฅธ ์ตœ์ ํ™” ๊ธฐ๋ฒ•์€ [์ด ๊ฐ€์ด๋“œ](./optimization/fp16)์—์„œ ๋‹ค๋ฃจ๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค.
0
hf_public_repos/diffusers/docs/source
hf_public_repos/diffusers/docs/source/ko/index.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <p align="center"> <br> <img src="https://raw.githubusercontent.com/huggingface/diffusers/77aadfee6a891ab9fcfb780f87c693f7a5beeb8e/docs/source/imgs/diffusers_library.jpg" width="400"/> <br> </p> # Diffusers ๐Ÿค— Diffusers๋Š” ์ด๋ฏธ์ง€, ์˜ค๋””์˜ค, ์‹ฌ์ง€์–ด ๋ถ„์ž์˜ 3D ๊ตฌ์กฐ๋ฅผ ์ƒ์„ฑํ•˜๊ธฐ ์œ„ํ•œ ์ตœ์ฒจ๋‹จ ์‚ฌ์ „ ํ›ˆ๋ จ๋œ diffusion ๋ชจ๋ธ์„ ์œ„ํ•œ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ์ž…๋‹ˆ๋‹ค. ๊ฐ„๋‹จํ•œ ์ถ”๋ก  ์†”๋ฃจ์…˜์„ ์ฐพ๊ณ  ์žˆ๋“ , ์ž์ฒด diffusion ๋ชจ๋ธ์„ ํ›ˆ๋ จํ•˜๊ณ  ์‹ถ๋“ , ๐Ÿค— Diffusers๋Š” ๋‘ ๊ฐ€์ง€ ๋ชจ๋‘๋ฅผ ์ง€์›ํ•˜๋Š” ๋ชจ๋“ˆ์‹ ํˆด๋ฐ•์Šค์ž…๋‹ˆ๋‹ค. ์ €ํฌ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๋Š” [์„ฑ๋Šฅ๋ณด๋‹ค ์‚ฌ์šฉ์„ฑ](conceptual/philosophy#usability-over-performance), [๊ฐ„ํŽธํ•จ๋ณด๋‹ค ๋‹จ์ˆœํ•จ](conceptual/philosophy#simple-over-easy), ๊ทธ๋ฆฌ๊ณ  [์ถ”์ƒํ™”๋ณด๋‹ค ์‚ฌ์šฉ์ž ์ง€์ • ๊ฐ€๋Šฅ์„ฑ](conceptual/philosophy#tweakable-contributorfriendly-over-abstraction)์— ์ค‘์ ์„ ๋‘๊ณ  ์„ค๊ณ„๋˜์—ˆ์Šต๋‹ˆ๋‹ค. ์ด ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ์—๋Š” ์„ธ ๊ฐ€์ง€ ์ฃผ์š” ๊ตฌ์„ฑ ์š”์†Œ๊ฐ€ ์žˆ์Šต๋‹ˆ๋‹ค: - ๋ช‡ ์ค„์˜ ์ฝ”๋“œ๋งŒ์œผ๋กœ ์ถ”๋ก ํ•  ์ˆ˜ ์žˆ๋Š” ์ตœ์ฒจ๋‹จ [diffusion ํŒŒ์ดํ”„๋ผ์ธ](api/pipelines/overview). - ์ƒ์„ฑ ์†๋„์™€ ํ’ˆ์งˆ ๊ฐ„์˜ ๊ท ํ˜•์„ ๋งž์ถ”๊ธฐ ์œ„ํ•ด ์ƒํ˜ธ๊ตํ™˜์ ์œผ๋กœ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ๋Š” [๋…ธ์ด์ฆˆ ์Šค์ผ€์ค„๋Ÿฌ](api/schedulers/overview). - ๋นŒ๋”ฉ ๋ธ”๋ก์œผ๋กœ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ๊ณ  ์Šค์ผ€์ค„๋Ÿฌ์™€ ๊ฒฐํ•ฉํ•˜์—ฌ ์ž์ฒด์ ์ธ end-to-end diffusion ์‹œ์Šคํ…œ์„ ๋งŒ๋“ค ์ˆ˜ ์žˆ๋Š” ์‚ฌ์ „ ํ•™์Šต๋œ [๋ชจ๋ธ](api/models). <div class="mt-10"> <div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5"> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./tutorials/tutorial_overview" ><div class="w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Tutorials</div> <p class="text-gray-700">๊ฒฐ๊ณผ๋ฌผ์„ ์ƒ์„ฑํ•˜๊ณ , ๋‚˜๋งŒ์˜ diffusion ์‹œ์Šคํ…œ์„ ๊ตฌ์ถ•ํ•˜๊ณ , ํ™•์‚ฐ ๋ชจ๋ธ์„ ํ›ˆ๋ จํ•˜๋Š” ๋ฐ ํ•„์š”ํ•œ ๊ธฐ๋ณธ ๊ธฐ์ˆ ์„ ๋ฐฐ์›Œ๋ณด์„ธ์š”. ๐Ÿค— Diffusers๋ฅผ ์ฒ˜์Œ ์‚ฌ์šฉํ•˜๋Š” ๊ฒฝ์šฐ ์—ฌ๊ธฐ์—์„œ ์‹œ์ž‘ํ•˜๋Š” ๊ฒƒ์ด ์ข‹์Šต๋‹ˆ๋‹ค!</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./using-diffusers/loading_overview" ><div class="w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">How-to guides</div> <p class="text-gray-700">ํŒŒ์ดํ”„๋ผ์ธ, ๋ชจ๋ธ, ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ๋กœ๋“œํ•˜๋Š” ๋ฐ ๋„์›€์ด ๋˜๋Š” ์‹ค์šฉ์ ์ธ ๊ฐ€์ด๋“œ์ž…๋‹ˆ๋‹ค. ๋˜ํ•œ ํŠน์ • ์ž‘์—…์— ํŒŒ์ดํ”„๋ผ์ธ์„ ์‚ฌ์šฉํ•˜๊ณ , ์ถœ๋ ฅ ์ƒ์„ฑ ๋ฐฉ์‹์„ ์ œ์–ดํ•˜๊ณ , ์ถ”๋ก  ์†๋„์— ๋งž๊ฒŒ ์ตœ์ ํ™”ํ•˜๊ณ , ๋‹ค์–‘ํ•œ ํ•™์Šต ๊ธฐ๋ฒ•์„ ์‚ฌ์šฉํ•˜๋Š” ๋ฐฉ๋ฒ•๋„ ๋ฐฐ์šธ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค.</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./conceptual/philosophy" ><div class="w-full text-center bg-gradient-to-br from-pink-400 to-pink-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Conceptual guides</div> <p class="text-gray-700">๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๊ฐ€ ์™œ ์ด๋Ÿฐ ๋ฐฉ์‹์œผ๋กœ ์„ค๊ณ„๋˜์—ˆ๋Š”์ง€ ์ดํ•ดํ•˜๊ณ , ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ ์ด์šฉ์— ๋Œ€ํ•œ ์œค๋ฆฌ์  ๊ฐ€์ด๋“œ๋ผ์ธ๊ณผ ์•ˆ์ „ ๊ตฌํ˜„์— ๋Œ€ํ•ด ์ž์„ธํžˆ ์•Œ์•„๋ณด์„ธ์š”.</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./api/models" ><div class="w-full text-center bg-gradient-to-br from-purple-400 to-purple-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Reference</div> <p class="text-gray-700">๐Ÿค— Diffusers ํด๋ž˜์Šค ๋ฐ ๋ฉ”์„œ๋“œ์˜ ์ž‘๋™ ๋ฐฉ์‹์— ๋Œ€ํ•œ ๊ธฐ์ˆ  ์„ค๋ช….</p> </a> </div> </div> ## Supported pipelines | Pipeline | Paper/Repository | Tasks | |---|---|:---:| | [alt_diffusion](./api/pipelines/alt_diffusion) | [AltCLIP: Altering the Language Encoder in CLIP for Extended Language Capabilities](https://arxiv.org/abs/2211.06679) | Image-to-Image Text-Guided Generation | | [audio_diffusion](./api/pipelines/audio_diffusion) | [Audio Diffusion](https://github.com/teticio/audio-diffusion.git) | Unconditional Audio Generation | | [controlnet](./api/pipelines/stable_diffusion/controlnet) | [Adding Conditional Control to Text-to-Image Diffusion Models](https://arxiv.org/abs/2302.05543) | Image-to-Image Text-Guided Generation | | [cycle_diffusion](./api/pipelines/cycle_diffusion) | [Unifying Diffusion Models' Latent Space, with Applications to CycleDiffusion and Guidance](https://arxiv.org/abs/2210.05559) | Image-to-Image Text-Guided Generation | | [dance_diffusion](./api/pipelines/dance_diffusion) | [Dance Diffusion](https://github.com/williamberman/diffusers.git) | Unconditional Audio Generation | | [ddpm](./api/pipelines/ddpm) | [Denoising Diffusion Probabilistic Models](https://arxiv.org/abs/2006.11239) | Unconditional Image Generation | | [ddim](./api/pipelines/ddim) | [Denoising Diffusion Implicit Models](https://arxiv.org/abs/2010.02502) | Unconditional Image Generation | | [if](./if) | [**IF**](./api/pipelines/if) | Image Generation | | [if_img2img](./if) | [**IF**](./api/pipelines/if) | Image-to-Image Generation | | [if_inpainting](./if) | [**IF**](./api/pipelines/if) | Image-to-Image Generation | | [latent_diffusion](./api/pipelines/latent_diffusion) | [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752)| Text-to-Image Generation | | [latent_diffusion](./api/pipelines/latent_diffusion) | [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752)| Super Resolution Image-to-Image | | [latent_diffusion_uncond](./api/pipelines/latent_diffusion_uncond) | [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) | Unconditional Image Generation | | [paint_by_example](./api/pipelines/paint_by_example) | [Paint by Example: Exemplar-based Image Editing with Diffusion Models](https://arxiv.org/abs/2211.13227) | Image-Guided Image Inpainting | | [pndm](./api/pipelines/pndm) | [Pseudo Numerical Methods for Diffusion Models on Manifolds](https://arxiv.org/abs/2202.09778) | Unconditional Image Generation | | [score_sde_ve](./api/pipelines/score_sde_ve) | [Score-Based Generative Modeling through Stochastic Differential Equations](https://openreview.net/forum?id=PxTIG12RRHS) | Unconditional Image Generation | | [score_sde_vp](./api/pipelines/score_sde_vp) | [Score-Based Generative Modeling through Stochastic Differential Equations](https://openreview.net/forum?id=PxTIG12RRHS) | Unconditional Image Generation | | [semantic_stable_diffusion](./api/pipelines/semantic_stable_diffusion) | [Semantic Guidance](https://arxiv.org/abs/2301.12247) | Text-Guided Generation | | [stable_diffusion_text2img](./api/pipelines/stable_diffusion/text2img) | [Stable Diffusion](https://stability.ai/blog/stable-diffusion-public-release) | Text-to-Image Generation | | [stable_diffusion_img2img](./api/pipelines/stable_diffusion/img2img) | [Stable Diffusion](https://stability.ai/blog/stable-diffusion-public-release) | Image-to-Image Text-Guided Generation | | [stable_diffusion_inpaint](./api/pipelines/stable_diffusion/inpaint) | [Stable Diffusion](https://stability.ai/blog/stable-diffusion-public-release) | Text-Guided Image Inpainting | | [stable_diffusion_panorama](./api/pipelines/stable_diffusion/panorama) | [MultiDiffusion](https://multidiffusion.github.io/) | Text-to-Panorama Generation | | [stable_diffusion_pix2pix](./api/pipelines/stable_diffusion/pix2pix) | [InstructPix2Pix: Learning to Follow Image Editing Instructions](https://arxiv.org/abs/2211.09800) | Text-Guided Image Editing| | [stable_diffusion_pix2pix_zero](./api/pipelines/stable_diffusion/pix2pix_zero) | [Zero-shot Image-to-Image Translation](https://pix2pixzero.github.io/) | Text-Guided Image Editing | | [stable_diffusion_attend_and_excite](./api/pipelines/stable_diffusion/attend_and_excite) | [Attend-and-Excite: Attention-Based Semantic Guidance for Text-to-Image Diffusion Models](https://arxiv.org/abs/2301.13826) | Text-to-Image Generation | | [stable_diffusion_self_attention_guidance](./api/pipelines/stable_diffusion/self_attention_guidance) | [Improving Sample Quality of Diffusion Models Using Self-Attention Guidance](https://arxiv.org/abs/2210.00939) | Text-to-Image Generation Unconditional Image Generation | | [stable_diffusion_image_variation](./stable_diffusion/image_variation) | [Stable Diffusion Image Variations](https://github.com/LambdaLabsML/lambda-diffusers#stable-diffusion-image-variations) | Image-to-Image Generation | | [stable_diffusion_latent_upscale](./stable_diffusion/latent_upscale) | [Stable Diffusion Latent Upscaler](https://twitter.com/StabilityAI/status/1590531958815064065) | Text-Guided Super Resolution Image-to-Image | | [stable_diffusion_model_editing](./api/pipelines/stable_diffusion/model_editing) | [Editing Implicit Assumptions in Text-to-Image Diffusion Models](https://time-diffusion.github.io/) | Text-to-Image Model Editing | | [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [Stable Diffusion 2](https://stability.ai/blog/stable-diffusion-v2-release) | Text-to-Image Generation | | [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [Stable Diffusion 2](https://stability.ai/blog/stable-diffusion-v2-release) | Text-Guided Image Inpainting | | [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [Depth-Conditional Stable Diffusion](https://github.com/Stability-AI/stablediffusion#depth-conditional-stable-diffusion) | Depth-to-Image Generation | | [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [Stable Diffusion 2](https://stability.ai/blog/stable-diffusion-v2-release) | Text-Guided Super Resolution Image-to-Image | | [stable_diffusion_safe](./api/pipelines/stable_diffusion_safe) | [Safe Stable Diffusion](https://arxiv.org/abs/2211.05105) | Text-Guided Generation | | [stable_unclip](./stable_unclip) | Stable unCLIP | Text-to-Image Generation | | [stable_unclip](./stable_unclip) | Stable unCLIP | Image-to-Image Text-Guided Generation | | [stochastic_karras_ve](./api/pipelines/stochastic_karras_ve) | [Elucidating the Design Space of Diffusion-Based Generative Models](https://arxiv.org/abs/2206.00364) | Unconditional Image Generation | | [text_to_video_sd](./api/pipelines/text_to_video) | [Modelscope's Text-to-video-synthesis Model in Open Domain](https://modelscope.cn/models/damo/text-to-video-synthesis/summary) | Text-to-Video Generation | | [unclip](./api/pipelines/unclip) | [Hierarchical Text-Conditional Image Generation with CLIP Latents](https://arxiv.org/abs/2204.06125)(implementation by [kakaobrain](https://github.com/kakaobrain/karlo)) | Text-to-Image Generation | | [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Text-to-Image Generation | | [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Image Variations Generation | | [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Dual Image and Text Guided Generation | | [vq_diffusion](./api/pipelines/vq_diffusion) | [Vector Quantized Diffusion Model for Text-to-Image Synthesis](https://arxiv.org/abs/2111.14822) | Text-to-Image Generation |
0
hf_public_repos/diffusers/docs/source
hf_public_repos/diffusers/docs/source/ko/quicktour.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> [[open-in-colab]] # ํ›‘์–ด๋ณด๊ธฐ Diffusion ๋ชจ๋ธ์€ ์ด๋ฏธ์ง€๋‚˜ ์˜ค๋””์˜ค์™€ ๊ฐ™์€ ๊ด€์‹ฌ ์ƒ˜ํ”Œ๋“ค์„ ์ƒ์„ฑํ•˜๊ธฐ ์œ„ํ•ด ๋žœ๋ค ๊ฐ€์šฐ์‹œ์•ˆ ๋…ธ์ด์ฆˆ๋ฅผ ๋‹จ๊ณ„๋ณ„๋กœ ์ œ๊ฑฐํ•˜๋„๋ก ํ•™์Šต๋ฉ๋‹ˆ๋‹ค. ์ด๋กœ ์ธํ•ด ์ƒ์„ฑ AI์— ๋Œ€ํ•œ ๊ด€์‹ฌ์ด ๋งค์šฐ ๋†’์•„์กŒ์œผ๋ฉฐ, ์ธํ„ฐ๋„ท์—์„œ diffusion ์ƒ์„ฑ ์ด๋ฏธ์ง€์˜ ์˜ˆ๋ฅผ ๋ณธ ์ ์ด ์žˆ์„ ๊ฒƒ์ž…๋‹ˆ๋‹ค. ๐Ÿงจ Diffusers๋Š” ๋ˆ„๊ตฌ๋‚˜ diffusion ๋ชจ๋ธ๋“ค์„ ๋„๋ฆฌ ์ด์šฉํ•  ์ˆ˜ ์žˆ๋„๋ก ํ•˜๊ธฐ ์œ„ํ•œ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ์ž…๋‹ˆ๋‹ค. ๊ฐœ๋ฐœ์ž๋“  ์ผ๋ฐ˜ ์‚ฌ์šฉ์ž๋“  ์ด ํ›‘์–ด๋ณด๊ธฐ๋ฅผ ํ†ตํ•ด ๐Ÿงจ diffusers๋ฅผ ์†Œ๊ฐœํ•˜๊ณ  ๋น ๋ฅด๊ฒŒ ์ƒ์„ฑํ•  ์ˆ˜ ์žˆ๋„๋ก ๋„์™€๋“œ๋ฆฝ๋‹ˆ๋‹ค! ์•Œ์•„์•ผ ํ•  ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ์˜ ์ฃผ์š” ๊ตฌ์„ฑ ์š”์†Œ๋Š” ํฌ๊ฒŒ ์„ธ ๊ฐ€์ง€์ž…๋‹ˆ๋‹ค: * [`DiffusionPipeline`]์€ ์ถ”๋ก ์„ ์œ„ํ•ด ์‚ฌ์ „ ํ•™์Šต๋œ diffusion ๋ชจ๋ธ์—์„œ ์ƒ˜ํ”Œ์„ ๋น ๋ฅด๊ฒŒ ์ƒ์„ฑํ•˜๋„๋ก ์„ค๊ณ„๋œ ๋†’์€ ์ˆ˜์ค€์˜ ์—”๋“œํˆฌ์—”๋“œ ํด๋ž˜์Šค์ž…๋‹ˆ๋‹ค. * Diffusion ์‹œ์Šคํ…œ ์ƒ์„ฑ์„ ์œ„ํ•œ ๋นŒ๋”ฉ ๋ธ”๋ก์œผ๋กœ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ๋Š” ๋„๋ฆฌ ์‚ฌ์šฉ๋˜๋Š” ์‚ฌ์ „ ํ•™์Šต๋œ [model](./api/models) ์•„ํ‚คํ…์ฒ˜ ๋ฐ ๋ชจ๋“ˆ. * ๋‹ค์–‘ํ•œ [schedulers](./api/schedulers/overview) - ํ•™์Šต์„ ์œ„ํ•ด ๋…ธ์ด์ฆˆ๋ฅผ ์ถ”๊ฐ€ํ•˜๋Š” ๋ฐฉ๋ฒ•๊ณผ ์ถ”๋ก  ์ค‘์— ๋…ธ์ด์ฆˆ ์ œ๊ฑฐ๋œ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ์ œ์–ดํ•˜๋Š” ์•Œ๊ณ ๋ฆฌ์ฆ˜์ž…๋‹ˆ๋‹ค. ํ›‘์–ด๋ณด๊ธฐ์—์„œ๋Š” ์ถ”๋ก ์„ ์œ„ํ•ด [`DiffusionPipeline`]์„ ์‚ฌ์šฉํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ๋ณด์—ฌ์ค€ ๋‹ค์Œ, ๋ชจ๋ธ๊ณผ ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ๊ฒฐํ•ฉํ•˜์—ฌ [`DiffusionPipeline`] ๋‚ด๋ถ€์—์„œ ์ผ์–ด๋‚˜๋Š” ์ผ์„ ๋ณต์ œํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ์•ˆ๋‚ดํ•ฉ๋‹ˆ๋‹ค. <Tip> ํ›‘์–ด๋ณด๊ธฐ๋Š” ๊ฐ„๊ฒฐํ•œ ๋ฒ„์ „์˜ ๐Ÿงจ Diffusers ์†Œ๊ฐœ๋กœ์„œ [๋…ธํŠธ๋ถ](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/diffusers_intro.ipynb) ๋น ๋ฅด๊ฒŒ ์‹œ์ž‘ํ•  ์ˆ˜ ์žˆ๋„๋ก ๋„์™€๋“œ๋ฆฝ๋‹ˆ๋‹ค. ๋””ํ“จ์ €์˜ ๋ชฉํ‘œ, ๋””์ž์ธ ์ฒ ํ•™, ํ•ต์‹ฌ API์— ๋Œ€ํ•œ ์ถ”๊ฐ€ ์„ธ๋ถ€ ์ •๋ณด๋ฅผ ์ž์„ธํžˆ ์•Œ์•„๋ณด๋ ค๋ฉด ๋…ธํŠธ๋ถ์„ ํ™•์ธํ•˜์„ธ์š”! </Tip> ์‹œ์ž‘ํ•˜๊ธฐ ์ „์— ํ•„์š”ํ•œ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๊ฐ€ ๋ชจ๋‘ ์„ค์น˜๋˜์–ด ์žˆ๋Š”์ง€ ํ™•์ธํ•˜์„ธ์š”: ```py # ์ฃผ์„ ํ’€์–ด์„œ Colab์— ํ•„์š”ํ•œ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ ์„ค์น˜ํ•˜๊ธฐ. #!pip install --upgrade diffusers accelerate transformers ``` - [๐Ÿค— Accelerate](https://huggingface.co/docs/accelerate/index)๋Š” ์ถ”๋ก  ๋ฐ ํ•™์Šต์„ ์œ„ํ•œ ๋ชจ๋ธ ๋กœ๋”ฉ ์†๋„๋ฅผ ๋†’์—ฌ์ค๋‹ˆ๋‹ค. - [๐Ÿค— Transformers](https://huggingface.co/docs/transformers/index)๋Š” [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview)๊ณผ ๊ฐ™์ด ๊ฐ€์žฅ ๋งŽ์ด ์‚ฌ์šฉ๋˜๋Š” diffusion ๋ชจ๋ธ์„ ์‹คํ–‰ํ•˜๋Š” ๋ฐ ํ•„์š”ํ•ฉ๋‹ˆ๋‹ค. ## DiffusionPipeline [`DiffusionPipeline`] ์€ ์ถ”๋ก ์„ ์œ„ํ•ด ์‚ฌ์ „ ํ•™์Šต๋œ diffusion ์‹œ์Šคํ…œ์„ ์‚ฌ์šฉํ•˜๋Š” ๊ฐ€์žฅ ์‰ฌ์šด ๋ฐฉ๋ฒ•์ž…๋‹ˆ๋‹ค. ๋ชจ๋ธ๊ณผ ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ํฌํ•จํ•˜๋Š” ์—”๋“œ ํˆฌ ์—”๋“œ ์‹œ์Šคํ…œ์ž…๋‹ˆ๋‹ค. ๋‹ค์–‘ํ•œ ์ž‘์—…์— [`DiffusionPipeline`]์„ ๋ฐ”๋กœ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์•„๋ž˜ ํ‘œ์—์„œ ์ง€์›๋˜๋Š” ๋ช‡ ๊ฐ€์ง€ ์ž‘์—…์„ ์‚ดํŽด๋ณด๊ณ , ์ง€์›๋˜๋Š” ์ž‘์—…์˜ ์ „์ฒด ๋ชฉ๋ก์€ [๐Ÿงจ Diffusers Summary](./api/pipelines/overview#diffusers-summary) ํ‘œ์—์„œ ํ™•์ธํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. | **Task** | **Description** | **Pipeline** |------------------------------|--------------------------------------------------------------------------------------------------------------|-----------------| | Unconditional Image Generation | generate an image from Gaussian noise | [unconditional_image_generation](./using-diffusers/unconditional_image_generation) | | Text-Guided Image Generation | generate an image given a text prompt | [conditional_image_generation](./using-diffusers/conditional_image_generation) | | Text-Guided Image-to-Image Translation | adapt an image guided by a text prompt | [img2img](./using-diffusers/img2img) | | Text-Guided Image-Inpainting | fill the masked part of an image given the image, the mask and a text prompt | [inpaint](./using-diffusers/inpaint) | | Text-Guided Depth-to-Image Translation | adapt parts of an image guided by a text prompt while preserving structure via depth estimation | [depth2img](./using-diffusers/depth2img) | ๋จผ์ € [`DiffusionPipeline`]์˜ ์ธ์Šคํ„ด์Šค๋ฅผ ์ƒ์„ฑํ•˜๊ณ  ๋‹ค์šด๋กœ๋“œํ•  ํŒŒ์ดํ”„๋ผ์ธ ์ฒดํฌํฌ์ธํŠธ๋ฅผ ์ง€์ •ํ•ฉ๋‹ˆ๋‹ค. ํ—ˆ๊น…ํŽ˜์ด์Šค ํ—ˆ๋ธŒ์— ์ €์žฅ๋œ ๋ชจ๋“  [checkpoint](https://huggingface.co/models?library=diffusers&sort=downloads)์— ๋Œ€ํ•ด [`DiffusionPipeline`]์„ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด ํ›‘์–ด๋ณด๊ธฐ์—์„œ๋Š” text-to-image ์ƒ์„ฑ์„ ์œ„ํ•œ [`stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) ์ฒดํฌํฌ์ธํŠธ๋ฅผ ๋กœ๋“œํ•ฉ๋‹ˆ๋‹ค. <Tip warning={true}> [Stable Diffusion](https://huggingface.co/CompVis/stable-diffusion) ๋ชจ๋ธ์˜ ๊ฒฝ์šฐ, ๋ชจ๋ธ์„ ์‹คํ–‰ํ•˜๊ธฐ ์ „์— [๋ผ์ด์„ ์Šค](https://huggingface.co/spaces/CompVis/stable-diffusion-license)๋ฅผ ๋จผ์ € ์ฃผ์˜ ๊นŠ๊ฒŒ ์ฝ์–ด์ฃผ์„ธ์š”. ๐Ÿงจ Diffusers๋Š” ๋ถˆ์พŒํ•˜๊ฑฐ๋‚˜ ์œ ํ•ดํ•œ ์ฝ˜ํ…์ธ ๋ฅผ ๋ฐฉ์ง€ํ•˜๊ธฐ ์œ„ํ•ด [`safety_checker`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py)๋ฅผ ๊ตฌํ˜„ํ•˜๊ณ  ์žˆ์ง€๋งŒ, ๋ชจ๋ธ์˜ ํ–ฅ์ƒ๋œ ์ด๋ฏธ์ง€ ์ƒ์„ฑ ๊ธฐ๋Šฅ์œผ๋กœ ์ธํ•ด ์—ฌ์ „ํžˆ ์ž ์žฌ์ ์œผ๋กœ ์œ ํ•ดํ•œ ์ฝ˜ํ…์ธ ๊ฐ€ ์ƒ์„ฑ๋  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. </Tip> [`~DiffusionPipeline.from_pretrained`] ๋ฐฉ๋ฒ•์œผ๋กœ ๋ชจ๋ธ ๋กœ๋“œํ•˜๊ธฐ: ```python >>> from diffusers import DiffusionPipeline >>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") ``` The [`DiffusionPipeline`]์€ ๋ชจ๋“  ๋ชจ๋ธ๋ง, ํ† ํฐํ™”, ์Šค์ผ€์ค„๋ง ์ปดํฌ๋„ŒํŠธ๋ฅผ ๋‹ค์šด๋กœ๋“œํ•˜๊ณ  ์บ์‹œํ•ฉ๋‹ˆ๋‹ค. Stable Diffusion Pipeline์€ ๋ฌด์—‡๋ณด๋‹ค๋„ [`UNet2DConditionModel`]๊ณผ [`PNDMScheduler`]๋กœ ๊ตฌ์„ฑ๋˜์–ด ์žˆ์Œ์„ ์•Œ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```py >>> pipeline StableDiffusionPipeline { "_class_name": "StableDiffusionPipeline", "_diffusers_version": "0.13.1", ..., "scheduler": [ "diffusers", "PNDMScheduler" ], ..., "unet": [ "diffusers", "UNet2DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ] } ``` ์ด ๋ชจ๋ธ์€ ์•ฝ 14์–ต ๊ฐœ์˜ ํŒŒ๋ผ๋ฏธํ„ฐ๋กœ ๊ตฌ์„ฑ๋˜์–ด ์žˆ์œผ๋ฏ€๋กœ GPU์—์„œ ํŒŒ์ดํ”„๋ผ์ธ์„ ์‹คํ–‰ํ•  ๊ฒƒ์„ ๊ฐ•๋ ฅํžˆ ๊ถŒ์žฅํ•ฉ๋‹ˆ๋‹ค. PyTorch์—์„œ์™€ ๋งˆ์ฐฌ๊ฐ€์ง€๋กœ ์ œ๋„ˆ๋ ˆ์ดํ„ฐ ๊ฐ์ฒด๋ฅผ GPU๋กœ ์ด๋™ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```python >>> pipeline.to("cuda") ``` ์ด์ œ `ํŒŒ์ดํ”„๋ผ์ธ`์— ํ…์ŠคํŠธ ํ”„๋กฌํ”„ํŠธ๋ฅผ ์ „๋‹ฌํ•˜์—ฌ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•œ ๋‹ค์Œ ๋…ธ์ด์ฆˆ๊ฐ€ ์ œ๊ฑฐ๋œ ์ด๋ฏธ์ง€์— ์•ก์„ธ์Šคํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๊ธฐ๋ณธ์ ์œผ๋กœ ์ด๋ฏธ์ง€ ์ถœ๋ ฅ์€ [`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html?highlight=image#the-image-class) ๊ฐ์ฒด๋กœ ๊ฐ์‹ธ์ง‘๋‹ˆ๋‹ค. ```python >>> image = pipeline("An image of a squirrel in Picasso style").images[0] >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/image_of_squirrel_painting.png"/> </div> `save`๋ฅผ ํ˜ธ์ถœํ•˜์—ฌ ์ด๋ฏธ์ง€๋ฅผ ์ €์žฅํ•ฉ๋‹ˆ๋‹ค: ```python >>> image.save("image_of_squirrel_painting.png") ``` ### ๋กœ์ปฌ ํŒŒ์ดํ”„๋ผ์ธ ํŒŒ์ดํ”„๋ผ์ธ์„ ๋กœ์ปฌ์—์„œ ์‚ฌ์šฉํ•  ์ˆ˜๋„ ์žˆ์Šต๋‹ˆ๋‹ค. ์œ ์ผํ•œ ์ฐจ์ด์ ์€ ๊ฐ€์ค‘์น˜๋ฅผ ๋จผ์ € ๋‹ค์šด๋กœ๋“œํ•ด์•ผ ํ•œ๋‹ค๋Š” ์ ์ž…๋‹ˆ๋‹ค: ```bash !git lfs install !git clone https://huggingface.co/runwayml/stable-diffusion-v1-5 ``` ๊ทธ๋Ÿฐ ๋‹ค์Œ ์ €์žฅ๋œ ๊ฐ€์ค‘์น˜๋ฅผ ํŒŒ์ดํ”„๋ผ์ธ์— ๋กœ๋“œํ•ฉ๋‹ˆ๋‹ค: ```python >>> pipeline = DiffusionPipeline.from_pretrained("./stable-diffusion-v1-5") ``` ์ด์ œ ์œ„ ์„น์…˜์—์„œ์™€ ๊ฐ™์ด ํŒŒ์ดํ”„๋ผ์ธ์„ ์‹คํ–‰ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ### ์Šค์ผ€์ค„๋Ÿฌ ๊ต์ฒด ์Šค์ผ€์ค„๋Ÿฌ๋งˆ๋‹ค ๋…ธ์ด์ฆˆ ์ œ๊ฑฐ ์†๋„์™€ ํ’ˆ์งˆ์ด ์„œ๋กœ ๋‹ค๋ฆ…๋‹ˆ๋‹ค. ์ž์‹ ์—๊ฒŒ ๊ฐ€์žฅ ์ ํ•ฉํ•œ ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ์ฐพ๋Š” ๊ฐ€์žฅ ์ข‹์€ ๋ฐฉ๋ฒ•์€ ์ง์ ‘ ์‚ฌ์šฉํ•ด ๋ณด๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค! ๐Ÿงจ Diffusers์˜ ์ฃผ์š” ๊ธฐ๋Šฅ ์ค‘ ํ•˜๋‚˜๋Š” ์Šค์ผ€์ค„๋Ÿฌ ๊ฐ„์— ์‰ฝ๊ฒŒ ์ „ํ™˜์ด ๊ฐ€๋Šฅํ•˜๋‹ค๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค. ์˜ˆ๋ฅผ ๋“ค์–ด, ๊ธฐ๋ณธ ์Šค์ผ€์ค„๋Ÿฌ์ธ [`PNDMScheduler`]๋ฅผ [`EulerDiscreteScheduler`]๋กœ ๋ฐ”๊พธ๋ ค๋ฉด, [`~diffusers.ConfigMixin.from_config`] ๋ฉ”์„œ๋“œ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ๋กœ๋“œํ•˜์„ธ์š”: ```py >>> from diffusers import EulerDiscreteScheduler >>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") >>> pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config) ``` ์ƒˆ ์Šค์ผ€์ค„๋Ÿฌ๋กœ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•ด๋ณด๊ณ  ์–ด๋–ค ์ฐจ์ด๊ฐ€ ์žˆ๋Š”์ง€ ํ™•์ธํ•ด ๋ณด์„ธ์š”! ๋‹ค์Œ ์„น์…˜์—์„œ๋Š” ๋ชจ๋ธ๊ณผ ์Šค์ผ€์ค„๋Ÿฌ๋ผ๋Š” [`DiffusionPipeline`]์„ ๊ตฌ์„ฑํ•˜๋Š” ์ปดํฌ๋„ŒํŠธ๋ฅผ ์ž์„ธํžˆ ์‚ดํŽด๋ณด๊ณ  ์ด๋Ÿฌํ•œ ์ปดํฌ๋„ŒํŠธ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ๊ณ ์–‘์ด ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ๋ฐฐ์›Œ๋ณด๊ฒ ์Šต๋‹ˆ๋‹ค. ## ๋ชจ๋ธ ๋Œ€๋ถ€๋ถ„์˜ ๋ชจ๋ธ์€ ๋…ธ์ด์ฆˆ๊ฐ€ ์žˆ๋Š” ์ƒ˜ํ”Œ์„ ๊ฐ€์ ธ์™€ ๊ฐ ์‹œ๊ฐ„ ๊ฐ„๊ฒฉ๋งˆ๋‹ค ๋…ธ์ด์ฆˆ๊ฐ€ ์ ์€ ์ด๋ฏธ์ง€์™€ ์ž…๋ ฅ ์ด๋ฏธ์ง€ ์‚ฌ์ด์˜ ์ฐจ์ด์ธ *๋…ธ์ด์ฆˆ ์ž”์ฐจ*(๋‹ค๋ฅธ ๋ชจ๋ธ์€ ์ด์ „ ์ƒ˜ํ”Œ์„ ์ง์ ‘ ์˜ˆ์ธกํ•˜๊ฑฐ๋‚˜ ์†๋„ ๋˜๋Š” [`v-prediction`](https://github.com/huggingface/diffusers/blob/5e5ce13e2f89ac45a0066cb3f369462a3cf1d9ef/src/diffusers/schedulers/scheduling_ddim.py#L110)์„ ์˜ˆ์ธกํ•˜๋Š” ํ•™์Šต์„ ํ•ฉ๋‹ˆ๋‹ค)์„ ์˜ˆ์ธกํ•ฉ๋‹ˆ๋‹ค. ๋ชจ๋ธ์„ ๋ฏน์Šค ์•ค ๋งค์น˜ํ•˜์—ฌ ๋‹ค๋ฅธ diffusion ์‹œ์Šคํ…œ์„ ๋งŒ๋“ค ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋ชจ๋ธ์€ [`~ModelMixin.from_pretrained`] ๋ฉ”์„œ๋“œ๋กœ ์‹œ์ž‘๋˜๋ฉฐ, ์ด ๋ฉ”์„œ๋“œ๋Š” ๋ชจ๋ธ ๊ฐ€์ค‘์น˜๋ฅผ ๋กœ์ปฌ์— ์บ์‹œํ•˜์—ฌ ๋‹ค์Œ์— ๋ชจ๋ธ์„ ๋กœ๋“œํ•  ๋•Œ ๋” ๋น ๋ฅด๊ฒŒ ๋กœ๋“œํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ํ›‘์–ด๋ณด๊ธฐ์—์„œ๋Š” ๊ณ ์–‘์ด ์ด๋ฏธ์ง€์— ๋Œ€ํ•ด ํ•™์Šต๋œ ์ฒดํฌํฌ์ธํŠธ๊ฐ€ ์žˆ๋Š” ๊ธฐ๋ณธ์ ์ธ unconditional ์ด๋ฏธ์ง€ ์ƒ์„ฑ ๋ชจ๋ธ์ธ [`UNet2DModel`]์„ ๋กœ๋“œํ•ฉ๋‹ˆ๋‹ค: ```py >>> from diffusers import UNet2DModel >>> repo_id = "google/ddpm-cat-256" >>> model = UNet2DModel.from_pretrained(repo_id) ``` ๋ชจ๋ธ ๋งค๊ฐœ๋ณ€์ˆ˜์— ์•ก์„ธ์Šคํ•˜๋ ค๋ฉด `model.config`๋ฅผ ํ˜ธ์ถœํ•ฉ๋‹ˆ๋‹ค: ```py >>> model.config ``` ๋ชจ๋ธ ๊ตฌ์„ฑ์€ ๐ŸงŠ ๊ณ ์ •๋œ ๐ŸงŠ ๋”•์…”๋„ˆ๋ฆฌ๋กœ, ๋ชจ๋ธ์ด ์ƒ์„ฑ๋œ ํ›„์—๋Š” ํ•ด๋‹น ๋งค๊ฐœ ๋ณ€์ˆ˜๋“ค์„ ๋ณ€๊ฒฝํ•  ์ˆ˜ ์—†์Šต๋‹ˆ๋‹ค. ์ด๋Š” ์˜๋„์ ์ธ ๊ฒƒ์œผ๋กœ, ์ฒ˜์Œ์— ๋ชจ๋ธ ์•„ํ‚คํ…์ฒ˜๋ฅผ ์ •์˜ํ•˜๋Š” ๋ฐ ์‚ฌ์šฉ๋œ ๋งค๊ฐœ๋ณ€์ˆ˜๋Š” ๋™์ผํ•˜๊ฒŒ ์œ ์ง€ํ•˜๋ฉด์„œ ๋‹ค๋ฅธ ๋งค๊ฐœ๋ณ€์ˆ˜๋Š” ์ถ”๋ก  ์ค‘์— ์กฐ์ •ํ•  ์ˆ˜ ์žˆ๋„๋ก ํ•˜๊ธฐ ์œ„ํ•œ ๊ฒƒ์ž…๋‹ˆ๋‹ค. ๊ฐ€์žฅ ์ค‘์š”ํ•œ ๋งค๊ฐœ๋ณ€์ˆ˜๋“ค์€ ๋‹ค์Œ๊ณผ ๊ฐ™์Šต๋‹ˆ๋‹ค: * `sample_size`: ์ž…๋ ฅ ์ƒ˜ํ”Œ์˜ ๋†’์ด ๋ฐ ๋„ˆ๋น„ ์น˜์ˆ˜์ž…๋‹ˆ๋‹ค. * `in_channels`: ์ž…๋ ฅ ์ƒ˜ํ”Œ์˜ ์ž…๋ ฅ ์ฑ„๋„ ์ˆ˜์ž…๋‹ˆ๋‹ค. * `down_block_types` ๋ฐ `up_block_types`: UNet ์•„ํ‚คํ…์ฒ˜๋ฅผ ์ƒ์„ฑํ•˜๋Š” ๋ฐ ์‚ฌ์šฉ๋˜๋Š” ๋‹ค์šด ๋ฐ ์—…์ƒ˜ํ”Œ๋ง ๋ธ”๋ก์˜ ์œ ํ˜•. * `block_out_channels`: ๋‹ค์šด์ƒ˜ํ”Œ๋ง ๋ธ”๋ก์˜ ์ถœ๋ ฅ ์ฑ„๋„ ์ˆ˜. ์—…์ƒ˜ํ”Œ๋ง ๋ธ”๋ก์˜ ์ž…๋ ฅ ์ฑ„๋„ ์ˆ˜์— ์—ญ์ˆœ์œผ๋กœ ์‚ฌ์šฉ๋˜๊ธฐ๋„ ํ•ฉ๋‹ˆ๋‹ค. * `layers_per_block`: ๊ฐ UNet ๋ธ”๋ก์— ์กด์žฌํ•˜๋Š” ResNet ๋ธ”๋ก์˜ ์ˆ˜์ž…๋‹ˆ๋‹ค. ์ถ”๋ก ์— ๋ชจ๋ธ์„ ์‚ฌ์šฉํ•˜๋ ค๋ฉด ๋žœ๋ค ๊ฐ€์šฐ์‹œ์•ˆ ๋…ธ์ด์ฆˆ๋กœ ์ด๋ฏธ์ง€ ๋ชจ์–‘์„ ๋งŒ๋“ญ๋‹ˆ๋‹ค. ๋ชจ๋ธ์ด ์—ฌ๋Ÿฌ ๊ฐœ์˜ ๋ฌด์ž‘์œ„ ๋…ธ์ด์ฆˆ๋ฅผ ์ˆ˜์‹ ํ•  ์ˆ˜ ์žˆ์œผ๋ฏ€๋กœ 'batch' ์ถ•, ์ž…๋ ฅ ์ฑ„๋„ ์ˆ˜์— ํ•ด๋‹นํ•˜๋Š” 'channel' ์ถ•, ์ด๋ฏธ์ง€์˜ ๋†’์ด์™€ ๋„ˆ๋น„๋ฅผ ๋‚˜ํƒ€๋‚ด๋Š” 'sample_size' ์ถ•์ด ์žˆ์–ด์•ผ ํ•ฉ๋‹ˆ๋‹ค: ```py >>> import torch >>> torch.manual_seed(0) >>> noisy_sample = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) >>> noisy_sample.shape torch.Size([1, 3, 256, 256]) ``` ์ถ”๋ก ์„ ์œ„ํ•ด ๋ชจ๋ธ์— ๋…ธ์ด์ฆˆ๊ฐ€ ์žˆ๋Š” ์ด๋ฏธ์ง€์™€ `timestep`์„ ์ „๋‹ฌํ•ฉ๋‹ˆ๋‹ค. 'timestep'์€ ์ž…๋ ฅ ์ด๋ฏธ์ง€์˜ ๋…ธ์ด์ฆˆ ์ •๋„๋ฅผ ๋‚˜ํƒ€๋‚ด๋ฉฐ, ์‹œ์ž‘ ๋ถ€๋ถ„์— ๋” ๋งŽ์€ ๋…ธ์ด์ฆˆ๊ฐ€ ์žˆ๊ณ  ๋ ๋ถ€๋ถ„์— ๋” ์ ์€ ๋…ธ์ด์ฆˆ๊ฐ€ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด๋ฅผ ํ†ตํ•ด ๋ชจ๋ธ์ด diffusion ๊ณผ์ •์—์„œ ์‹œ์ž‘ ๋˜๋Š” ๋์— ๋” ๊ฐ€๊นŒ์šด ์œ„์น˜๋ฅผ ๊ฒฐ์ •ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. `sample` ๋ฉ”์„œ๋“œ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ๋ชจ๋ธ ์ถœ๋ ฅ์„ ์–ป์Šต๋‹ˆ๋‹ค: ```py >>> with torch.no_grad(): ... noisy_residual = model(sample=noisy_sample, timestep=2).sample ``` ํ•˜์ง€๋งŒ ์‹ค์ œ ์˜ˆ๋ฅผ ์ƒ์„ฑํ•˜๋ ค๋ฉด ๋…ธ์ด์ฆˆ ์ œ๊ฑฐ ํ”„๋กœ์„ธ์Šค๋ฅผ ์•ˆ๋‚ดํ•  ์Šค์ผ€์ค„๋Ÿฌ๊ฐ€ ํ•„์š”ํ•ฉ๋‹ˆ๋‹ค. ๋‹ค์Œ ์„น์…˜์—์„œ๋Š” ๋ชจ๋ธ์„ ์Šค์ผ€์ค„๋Ÿฌ์™€ ๊ฒฐํ•ฉํ•˜๋Š” ๋ฐฉ๋ฒ•์— ๋Œ€ํ•ด ์•Œ์•„๋ด…๋‹ˆ๋‹ค. ## ์Šค์ผ€์ค„๋Ÿฌ ์Šค์ผ€์ค„๋Ÿฌ๋Š” ๋ชจ๋ธ ์ถœ๋ ฅ์ด ์ฃผ์–ด์กŒ์„ ๋•Œ ๋…ธ์ด์ฆˆ๊ฐ€ ๋งŽ์€ ์ƒ˜ํ”Œ์—์„œ ๋…ธ์ด์ฆˆ๊ฐ€ ์ ์€ ์ƒ˜ํ”Œ๋กœ ์ „ํ™˜ํ•˜๋Š” ๊ฒƒ์„ ๊ด€๋ฆฌํ•ฉ๋‹ˆ๋‹ค - ์ด ๊ฒฝ์šฐ 'noisy_residual'. <Tip> ๐Ÿงจ Diffusers๋Š” Diffusion ์‹œ์Šคํ…œ์„ ๊ตฌ์ถ•ํ•˜๊ธฐ ์œ„ํ•œ ํˆด๋ฐ•์Šค์ž…๋‹ˆ๋‹ค. [`DiffusionPipeline`]์„ ์‚ฌ์šฉํ•˜๋ฉด ๋ฏธ๋ฆฌ ๋งŒ๋“ค์–ด์ง„ Diffusion ์‹œ์Šคํ…œ์„ ํŽธ๋ฆฌํ•˜๊ฒŒ ์‹œ์ž‘ํ•  ์ˆ˜ ์žˆ์ง€๋งŒ, ๋ชจ๋ธ๊ณผ ์Šค์ผ€์ค„๋Ÿฌ ๊ตฌ์„ฑ ์š”์†Œ๋ฅผ ๊ฐœ๋ณ„์ ์œผ๋กœ ์„ ํƒํ•˜์—ฌ ์‚ฌ์šฉ์ž ์ง€์ • Diffusion ์‹œ์Šคํ…œ์„ ๊ตฌ์ถ•ํ•  ์ˆ˜๋„ ์žˆ์Šต๋‹ˆ๋‹ค. </Tip> ํ›‘์–ด๋ณด๊ธฐ์˜ ๊ฒฝ์šฐ, [`~diffusers.ConfigMixin.from_config`] ๋ฉ”์„œ๋“œ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ [`DDPMScheduler`]๋ฅผ ์ธ์Šคํ„ด์Šคํ™”ํ•ฉ๋‹ˆ๋‹ค: ```py >>> from diffusers import DDPMScheduler >>> scheduler = DDPMScheduler.from_config(repo_id) >>> scheduler DDPMScheduler { "_class_name": "DDPMScheduler", "_diffusers_version": "0.13.1", "beta_end": 0.02, "beta_schedule": "linear", "beta_start": 0.0001, "clip_sample": true, "clip_sample_range": 1.0, "num_train_timesteps": 1000, "prediction_type": "epsilon", "trained_betas": null, "variance_type": "fixed_small" } ``` <Tip> ๐Ÿ’ก ์Šค์ผ€์ค„๋Ÿฌ๊ฐ€ ๊ตฌ์„ฑ์—์„œ ์–ด๋–ป๊ฒŒ ์ธ์Šคํ„ด์Šคํ™”๋˜๋Š”์ง€ ์ฃผ๋ชฉํ•˜์„ธ์š”. ๋ชจ๋ธ๊ณผ ๋‹ฌ๋ฆฌ ์Šค์ผ€์ค„๋Ÿฌ์—๋Š” ํ•™์Šต ๊ฐ€๋Šฅํ•œ ๊ฐ€์ค‘์น˜๊ฐ€ ์—†์œผ๋ฉฐ ๋งค๊ฐœ๋ณ€์ˆ˜๋„ ์—†์Šต๋‹ˆ๋‹ค! </Tip> ๊ฐ€์žฅ ์ค‘์š”ํ•œ ๋งค๊ฐœ๋ณ€์ˆ˜๋Š” ๋‹ค์Œ๊ณผ ๊ฐ™์Šต๋‹ˆ๋‹ค: * `num_train_timesteps`: ๋…ธ์ด์ฆˆ ์ œ๊ฑฐ ํ”„๋กœ์„ธ์Šค์˜ ๊ธธ์ด, ์ฆ‰ ๋žœ๋ค ๊ฐ€์šฐ์Šค ๋…ธ์ด์ฆˆ๋ฅผ ๋ฐ์ดํ„ฐ ์ƒ˜ํ”Œ๋กœ ์ฒ˜๋ฆฌํ•˜๋Š” ๋ฐ ํ•„์š”ํ•œ ํƒ€์ž„์Šคํ… ์ˆ˜์ž…๋‹ˆ๋‹ค. * `beta_schedule`: ์ถ”๋ก  ๋ฐ ํ•™์Šต์— ์‚ฌ์šฉํ•  ๋…ธ์ด์ฆˆ ์Šค์ผ€์ค„ ์œ ํ˜•์ž…๋‹ˆ๋‹ค. * `beta_start` ๋ฐ `beta_end`: ๋…ธ์ด์ฆˆ ์Šค์ผ€์ค„์˜ ์‹œ์ž‘ ๋ฐ ์ข…๋ฃŒ ๋…ธ์ด์ฆˆ ๊ฐ’์ž…๋‹ˆ๋‹ค. ๋…ธ์ด์ฆˆ๊ฐ€ ์•ฝ๊ฐ„ ์ ์€ ์ด๋ฏธ์ง€๋ฅผ ์˜ˆ์ธกํ•˜๋ ค๋ฉด ์Šค์ผ€์ค„๋Ÿฌ์˜ [`~diffusers.DDPMScheduler.step`] ๋ฉ”์„œ๋“œ์— ๋ชจ๋ธ ์ถœ๋ ฅ, `timestep`, ํ˜„์žฌ `sample`์„ ์ „๋‹ฌํ•˜์„ธ์š”. ```py >>> less_noisy_sample = scheduler.step(model_output=noisy_residual, timestep=2, sample=noisy_sample).prev_sample >>> less_noisy_sample.shape ``` `less_noisy_sample`์„ ๋‹ค์Œ `timestep`์œผ๋กœ ๋„˜๊ธฐ๋ฉด ๋…ธ์ด์ฆˆ๊ฐ€ ๋” ์ค„์–ด๋“ญ๋‹ˆ๋‹ค! ์ด์ œ ์ด ๋ชจ๋“  ๊ฒƒ์„ ํ•œ๋ฐ ๋ชจ์•„ ์ „์ฒด ๋…ธ์ด์ฆˆ ์ œ๊ฑฐ ๊ณผ์ •์„ ์‹œ๊ฐํ™”ํ•ด ๋ณด๊ฒ ์Šต๋‹ˆ๋‹ค. ๋จผ์ € ๋…ธ์ด์ฆˆ ์ œ๊ฑฐ๋œ ์ด๋ฏธ์ง€๋ฅผ ํ›„์ฒ˜๋ฆฌํ•˜์—ฌ `PIL.Image`๋กœ ํ‘œ์‹œํ•˜๋Š” ํ•จ์ˆ˜๋ฅผ ๋งŒ๋“ญ๋‹ˆ๋‹ค: ```py >>> import PIL.Image >>> import numpy as np >>> def display_sample(sample, i): ... image_processed = sample.cpu().permute(0, 2, 3, 1) ... image_processed = (image_processed + 1.0) * 127.5 ... image_processed = image_processed.numpy().astype(np.uint8) ... image_pil = PIL.Image.fromarray(image_processed[0]) ... display(f"Image at step {i}") ... display(image_pil) ``` ๋…ธ์ด์ฆˆ ์ œ๊ฑฐ ํ”„๋กœ์„ธ์Šค์˜ ์†๋„๋ฅผ ๋†’์ด๋ ค๋ฉด ์ž…๋ ฅ๊ณผ ๋ชจ๋ธ์„ GPU๋กœ ์˜ฎ๊ธฐ์„ธ์š”: ```py >>> model.to("cuda") >>> noisy_sample = noisy_sample.to("cuda") ``` ์ด์ œ ๋…ธ์ด์ฆˆ๊ฐ€ ์ ์€ ์ƒ˜ํ”Œ์˜ ์ž”์ฐจ๋ฅผ ์˜ˆ์ธกํ•˜๊ณ  ์Šค์ผ€์ค„๋Ÿฌ๋กœ ๋…ธ์ด์ฆˆ๊ฐ€ ์ ์€ ์ƒ˜ํ”Œ์„ ๊ณ„์‚ฐํ•˜๋Š” ๋…ธ์ด์ฆˆ ์ œ๊ฑฐ ๋ฃจํ”„๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค: ```py >>> import tqdm >>> sample = noisy_sample >>> for i, t in enumerate(tqdm.tqdm(scheduler.timesteps)): ... # 1. predict noise residual ... with torch.no_grad(): ... residual = model(sample, t).sample ... # 2. compute less noisy image and set x_t -> x_t-1 ... sample = scheduler.step(residual, t, sample).prev_sample ... # 3. optionally look at image ... if (i + 1) % 50 == 0: ... display_sample(sample, i + 1) ``` ๊ฐ€๋งŒํžˆ ์•‰์•„์„œ ๊ณ ์–‘์ด๊ฐ€ ์†Œ์Œ์œผ๋กœ๋งŒ ์ƒ์„ฑ๋˜๋Š” ๊ฒƒ์„ ์ง€์ผœ๋ณด์„ธ์š”!๐Ÿ˜ป <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/diffusion-quicktour.png"/> </div> ## ๋‹ค์Œ ๋‹จ๊ณ„ ์ด๋ฒˆ ํ›‘์–ด๋ณด๊ธฐ์—์„œ ๐Ÿงจ Diffusers๋กœ ๋ฉ‹์ง„ ์ด๋ฏธ์ง€๋ฅผ ๋งŒ๋“ค์–ด ๋ณด์…จ๊ธฐ๋ฅผ ๋ฐ”๋ž๋‹ˆ๋‹ค! ๋‹ค์Œ ๋‹จ๊ณ„๋กœ ๋„˜์–ด๊ฐ€์„ธ์š”: * [training](./tutorials/basic_training) ํŠœํ† ๋ฆฌ์–ผ์—์„œ ๋ชจ๋ธ์„ ํ•™์Šตํ•˜๊ฑฐ๋‚˜ ํŒŒ์ธํŠœ๋‹ํ•˜์—ฌ ๋‚˜๋งŒ์˜ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. * ๋‹ค์–‘ํ•œ ์‚ฌ์šฉ ์‚ฌ๋ก€๋Š” ๊ณต์‹ ๋ฐ ์ปค๋ฎค๋‹ˆํ‹ฐ [ํ•™์Šต ๋˜๋Š” ํŒŒ์ธํŠœ๋‹ ์Šคํฌ๋ฆฝํŠธ](https://github.com/huggingface/diffusers/tree/main/examples#-diffusers-examples) ์˜ˆ์‹œ๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. * ์Šค์ผ€์ค„๋Ÿฌ ๋กœ๋“œ, ์•ก์„ธ์Šค, ๋ณ€๊ฒฝ ๋ฐ ๋น„๊ต์— ๋Œ€ํ•œ ์ž์„ธํ•œ ๋‚ด์šฉ์€ [๋‹ค๋ฅธ ์Šค์ผ€์ค„๋Ÿฌ ์‚ฌ์šฉ](./using-diffusers/schedulers) ๊ฐ€์ด๋“œ์—์„œ ํ™•์ธํ•˜์„ธ์š”. * [Stable Diffusion](./stable_diffusion) ๊ฐ€์ด๋“œ์—์„œ ํ”„๋กฌํ”„ํŠธ ์—”์ง€๋‹ˆ์–ด๋ง, ์†๋„ ๋ฐ ๋ฉ”๋ชจ๋ฆฌ ์ตœ์ ํ™”, ๊ณ ํ’ˆ์งˆ ์ด๋ฏธ์ง€ ์ƒ์„ฑ์„ ์œ„ํ•œ ํŒ๊ณผ ์š”๋ น์„ ์‚ดํŽด๋ณด์„ธ์š”. * [GPU์—์„œ ํŒŒ์ดํ† ์น˜ ์ตœ์ ํ™”](./optimization/fp16) ๊ฐ€์ด๋“œ์™€ [์• ํ”Œ ์‹ค๋ฆฌ์ฝ˜(M1/M2)์—์„œ์˜ Stable Diffusion](./optimization/mps) ๋ฐ [ONNX ๋Ÿฐํƒ€์ž„](./optimization/onnx) ์‹คํ–‰์— ๋Œ€ํ•œ ์ถ”๋ก  ๊ฐ€์ด๋“œ๋ฅผ ํ†ตํ•ด ๐Ÿงจ Diffuser ์†๋„๋ฅผ ๋†’์ด๋Š” ๋ฐฉ๋ฒ•์„ ๋” ์ž์„ธํžˆ ์•Œ์•„๋ณด์„ธ์š”.
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/tutorials/tutorial_overview.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Overview ๐Ÿงจย Diffusers์— ์˜ค์‹  ๊ฑธ ํ™˜์˜ํ•ฉ๋‹ˆ๋‹ค! ์—ฌ๋Ÿฌ๋ถ„์ด diffusion ๋ชจ๋ธ๊ณผ ์ƒ์„ฑ AI๋ฅผ ์ฒ˜์Œ ์ ‘ํ•˜๊ณ , ๋” ๋งŽ์€ ๊ฑธ ๋ฐฐ์šฐ๊ณ  ์‹ถ์œผ์…จ๋‹ค๋ฉด ์ œ๋Œ€๋กœ ์ฐพ์•„์˜ค์…จ์Šต๋‹ˆ๋‹ค. ์ด ํŠœํ† ๋ฆฌ์–ผ์€ diffusion model์„ ์—ฌ๋Ÿฌ๋ถ„์—๊ฒŒ ์  ํ‹€ํ•˜๊ฒŒ ์†Œ๊ฐœํ•˜๊ณ , ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ์˜ ๊ธฐ๋ณธ ์‚ฌํ•ญ(ํ•ต์‹ฌ ๊ตฌ์„ฑ์š”์†Œ์™€ ๐Ÿงจย Diffusers ์‚ฌ์šฉ๋ฒ•)์„ ์ดํ•ดํ•˜๋Š” ๋ฐ ๋„์›€์ด ๋˜๋„๋ก ์„ค๊ณ„๋˜์—ˆ์Šต๋‹ˆ๋‹ค. ์—ฌ๋Ÿฌ๋ถ„์€ ์ด ํŠœํ† ๋ฆฌ์–ผ์„ ํ†ตํ•ด ๋น ๋ฅด๊ฒŒ ์ƒ์„ฑํ•˜๊ธฐ ์œ„ํ•ด์„  ์ถ”๋ก  ํŒŒ์ดํ”„๋ผ์ธ์„ ์–ด๋–ป๊ฒŒ ์‚ฌ์šฉํ•ด์•ผ ํ•˜๋Š”์ง€, ๊ทธ๋ฆฌ๊ณ  ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๋ฅผ modular toolbox์ฒ˜๋Ÿผ ์ด์šฉํ•ด์„œ ์—ฌ๋Ÿฌ๋ถ„๋งŒ์˜ diffusion system์„ ๊ตฌ์ถ•ํ•  ์ˆ˜ ์žˆ๋„๋ก ํŒŒ์ดํ”„๋ผ์ธ์„ ๋ถ„ํ•ดํ•˜๋Š” ๋ฒ•์„ ๋ฐฐ์šธ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋‹ค์Œ ๋‹จ์›์—์„œ๋Š” ์—ฌ๋Ÿฌ๋ถ„์ด ์›ํ•˜๋Š” ๊ฒƒ์„ ์ƒ์„ฑํ•˜๊ธฐ ์œ„ํ•ด ์ž์‹ ๋งŒ์˜ diffusion model์„ ํ•™์Šตํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ๋ฐฐ์šฐ๊ฒŒ ๋ฉ๋‹ˆ๋‹ค. ํŠœํ† ๋ฆฌ์–ผ์„ ์™„๋ฃŒํ•œ๋‹ค๋ฉด ์—ฌ๋Ÿฌ๋ถ„์€ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๋ฅผ ์ง์ ‘ ํƒ์ƒ‰ํ•˜๊ณ , ์ž์‹ ์˜ ํ”„๋กœ์ ํŠธ์™€ ์• ํ”Œ๋ฆฌ์ผ€์ด์…˜์— ์ ์šฉํ•  ์Šคํ‚ฌ๋“ค์„ ์Šต๋“ํ•  ์ˆ˜ ์žˆ์„ ๊ฒ๋‹ˆ๋‹ค. [Discord](https://discord.com/invite/JfAtkvEtRb)๋‚˜ [ํฌ๋Ÿผ](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63) ์ปค๋ฎค๋‹ˆํ‹ฐ์— ์ž์œ ๋กญ๊ฒŒ ์ฐธ์—ฌํ•ด์„œ ๋‹ค๋ฅธ ์‚ฌ์šฉ์ž์™€ ๊ฐœ๋ฐœ์ž๋“ค๊ณผ ๊ต๋ฅ˜ํ•˜๊ณ  ํ˜‘์—…ํ•ด ๋ณด์„ธ์š”! ์ž ์ง€๊ธˆ๋ถ€ํ„ฐ diffusing์„ ์‹œ์ž‘ํ•ด ๋ณด๊ฒ ์Šต๋‹ˆ๋‹ค! ๐Ÿงจ
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/tutorials/basic_training.md
๏ปฟ<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> [[open-in-colab]] # Diffusion ๋ชจ๋ธ์„ ํ•™์Šตํ•˜๊ธฐ Unconditional ์ด๋ฏธ์ง€ ์ƒ์„ฑ์€ ํ•™์Šต์— ์‚ฌ์šฉ๋œ ๋ฐ์ดํ„ฐ์…‹๊ณผ ์œ ์‚ฌํ•œ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•˜๋Š” diffusion ๋ชจ๋ธ์—์„œ ์ธ๊ธฐ ์žˆ๋Š” ์–ดํ”Œ๋ฆฌ์ผ€์ด์…˜์ž…๋‹ˆ๋‹ค. ์ผ๋ฐ˜์ ์œผ๋กœ, ๊ฐ€์žฅ ์ข‹์€ ๊ฒฐ๊ณผ๋Š” ํŠน์ • ๋ฐ์ดํ„ฐ์…‹์— ์‚ฌ์ „ ํ›ˆ๋ จ๋œ ๋ชจ๋ธ์„ ํŒŒ์ธํŠœ๋‹ํ•˜๋Š” ๊ฒƒ์œผ๋กœ ์–ป์„ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด [ํ—ˆ๋ธŒ](https://huggingface.co/search/full-text?q=unconditional-image-generation&type=model)์—์„œ ์ด๋Ÿฌํ•œ ๋งŽ์€ ์ฒดํฌํฌ์ธํŠธ๋ฅผ ์ฐพ์„ ์ˆ˜ ์žˆ์ง€๋งŒ, ๋งŒ์•ฝ ๋งˆ์Œ์— ๋“œ๋Š” ์ฒดํฌํฌ์ธํŠธ๋ฅผ ์ฐพ์ง€ ๋ชปํ–ˆ๋‹ค๋ฉด, ์–ธ์ œ๋“ ์ง€ ์Šค์Šค๋กœ ํ•™์Šตํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค! ์ด ํŠœํ† ๋ฆฌ์–ผ์€ ๋‚˜๋งŒ์˜ ๐Ÿฆ‹ ๋‚˜๋น„ ๐Ÿฆ‹๋ฅผ ์ƒ์„ฑํ•˜๊ธฐ ์œ„ํ•ด [Smithsonian Butterflies](https://huggingface.co/datasets/huggan/smithsonian_butterflies_subset) ๋ฐ์ดํ„ฐ์…‹์˜ ํ•˜์œ„ ์ง‘ํ•ฉ์—์„œ [`UNet2DModel`] ๋ชจ๋ธ์„ ํ•™์Šตํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ๊ฐ€๋ฅด์ณ์ค„ ๊ฒƒ์ž…๋‹ˆ๋‹ค. <Tip> ๐Ÿ’ก ์ด ํ•™์Šต ํŠœํ† ๋ฆฌ์–ผ์€ [Training with ๐Ÿงจ Diffusers](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) ๋…ธํŠธ๋ถ ๊ธฐ๋ฐ˜์œผ๋กœ ํ•ฉ๋‹ˆ๋‹ค. Diffusion ๋ชจ๋ธ์˜ ์ž‘๋™ ๋ฐฉ์‹ ๋ฐ ์ž์„ธํ•œ ๋‚ด์šฉ์€ ๋…ธํŠธ๋ถ์„ ํ™•์ธํ•˜์„ธ์š”! </Tip> ์‹œ์ž‘ ์ „์—, ๐Ÿค— Datasets์„ ๋ถˆ๋Ÿฌ์˜ค๊ณ  ์ „์ฒ˜๋ฆฌํ•˜๊ธฐ ์œ„ํ•ด ๋ฐ์ดํ„ฐ์…‹์ด ์„ค์น˜๋˜์–ด ์žˆ๋Š”์ง€ ๋‹ค์ˆ˜ GPU์—์„œ ํ•™์Šต์„ ๊ฐ„์†Œํ™”ํ•˜๊ธฐ ์œ„ํ•ด ๐Ÿค— Accelerate ๊ฐ€ ์„ค์น˜๋˜์–ด ์žˆ๋Š”์ง€ ํ™•์ธํ•˜์„ธ์š”. ๊ทธ ํ›„ ํ•™์Šต ๋ฉ”ํŠธ๋ฆญ์„ ์‹œ๊ฐํ™”ํ•˜๊ธฐ ์œ„ํ•ด [TensorBoard](https://www.tensorflow.org/tensorboard)๋ฅผ ๋˜ํ•œ ์„ค์น˜ํ•˜์„ธ์š”. (๋˜ํ•œ ํ•™์Šต ์ถ”์ ์„ ์œ„ํ•ด [Weights & Biases](https://docs.wandb.ai/)๋ฅผ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค.) ```bash !pip install diffusers[training] ``` ์ปค๋ฎค๋‹ˆํ‹ฐ์— ๋ชจ๋ธ์„ ๊ณต์œ ํ•  ๊ฒƒ์„ ๊ถŒ์žฅํ•˜๋ฉฐ, ์ด๋ฅผ ์œ„ํ•ด์„œ Hugging Face ๊ณ„์ •์— ๋กœ๊ทธ์ธ์„ ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. (๊ณ„์ •์ด ์—†๋‹ค๋ฉด [์—ฌ๊ธฐ](https://hf.co/join)์—์„œ ๋งŒ๋“ค ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค.) ๋…ธํŠธ๋ถ์—์„œ ๋กœ๊ทธ์ธํ•  ์ˆ˜ ์žˆ์œผ๋ฉฐ ๋ฉ”์‹œ์ง€๊ฐ€ ํ‘œ์‹œ๋˜๋ฉด ํ† ํฐ์„ ์ž…๋ ฅํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ๋˜๋Š” ํ„ฐ๋ฏธ๋„๋กœ ๋กœ๊ทธ์ธํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```bash huggingface-cli login ``` ๋ชจ๋ธ ์ฒดํฌํฌ์ธํŠธ๊ฐ€ ์ƒ๋‹นํžˆ ํฌ๊ธฐ ๋•Œ๋ฌธ์— [Git-LFS](https://git-lfs.com/)์—์„œ ๋Œ€์šฉ๋Ÿ‰ ํŒŒ์ผ์˜ ๋ฒ„์ „ ๊ด€๋ฆฌ๋ฅผ ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ```bash !sudo apt -qq install git-lfs !git config --global credential.helper store ``` ## ํ•™์Šต ๊ตฌ์„ฑ ํŽธ์˜๋ฅผ ์œ„ํ•ด ํ•™์Šต ํŒŒ๋ผ๋ฏธํ„ฐ๋“ค์„ ํฌํ•จํ•œ `TrainingConfig` ํด๋ž˜์Šค๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค (์ž์œ ๋กญ๊ฒŒ ์กฐ์ • ๊ฐ€๋Šฅ): ```py >>> from dataclasses import dataclass >>> @dataclass ... class TrainingConfig: ... image_size = 128 # ์ƒ์„ฑ๋˜๋Š” ์ด๋ฏธ์ง€ ํ•ด์ƒ๋„ ... train_batch_size = 16 ... eval_batch_size = 16 # ํ‰๊ฐ€ ๋™์•ˆ์— ์ƒ˜ํ”Œ๋งํ•  ์ด๋ฏธ์ง€ ์ˆ˜ ... num_epochs = 50 ... gradient_accumulation_steps = 1 ... learning_rate = 1e-4 ... lr_warmup_steps = 500 ... save_image_epochs = 10 ... save_model_epochs = 30 ... mixed_precision = "fp16" # `no`๋Š” float32, ์ž๋™ ํ˜ผํ•ฉ ์ •๋ฐ€๋„๋ฅผ ์œ„ํ•œ `fp16` ... output_dir = "ddpm-butterflies-128" # ๋กœ์ปฌ ๋ฐ HF Hub์— ์ €์žฅ๋˜๋Š” ๋ชจ๋ธ๋ช… ... push_to_hub = True # ์ €์žฅ๋œ ๋ชจ๋ธ์„ HF Hub์— ์—…๋กœ๋“œํ• ์ง€ ์—ฌ๋ถ€ ... hub_private_repo = False ... overwrite_output_dir = True # ๋…ธํŠธ๋ถ์„ ๋‹ค์‹œ ์‹คํ–‰ํ•  ๋•Œ ์ด์ „ ๋ชจ๋ธ์— ๋ฎ์–ด์”Œ์šธ์ง€ ... seed = 0 >>> config = TrainingConfig() ``` ## ๋ฐ์ดํ„ฐ์…‹ ๋ถˆ๋Ÿฌ์˜ค๊ธฐ ๐Ÿค— Datasets ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ์™€ [Smithsonian Butterflies](https://huggingface.co/datasets/huggan/smithsonian_butterflies_subset) ๋ฐ์ดํ„ฐ์…‹์„ ์‰ฝ๊ฒŒ ๋ถˆ๋Ÿฌ์˜ฌ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ```py >>> from datasets import load_dataset >>> config.dataset_name = "huggan/smithsonian_butterflies_subset" >>> dataset = load_dataset(config.dataset_name, split="train") ``` ๐Ÿ’ก[HugGan Community Event](https://huggingface.co/huggan) ์—์„œ ์ถ”๊ฐ€์˜ ๋ฐ์ดํ„ฐ์…‹์„ ์ฐพ๊ฑฐ๋‚˜ ๋กœ์ปฌ์˜ [`ImageFolder`](https://huggingface.co/docs/datasets/image_dataset#imagefolder)๋ฅผ ๋งŒ๋“ฆ์œผ๋กœ์จ ๋‚˜๋งŒ์˜ ๋ฐ์ดํ„ฐ์…‹์„ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. HugGan Community Event ์— ๊ฐ€์ ธ์˜จ ๋ฐ์ดํ„ฐ์…‹์˜ ๊ฒฝ์šฐ ๋ฆฌํฌ์ง€ํ† ๋ฆฌ์˜ id๋กœ `config.dataset_name` ์„ ์„ค์ •ํ•˜๊ณ , ๋‚˜๋งŒ์˜ ์ด๋ฏธ์ง€๋ฅผ ์‚ฌ์šฉํ•˜๋Š” ๊ฒฝ์šฐ `imagefolder` ๋ฅผ ์„ค์ •ํ•ฉ๋‹ˆ๋‹ค. ๐Ÿค— Datasets์€ [`~datasets.Image`] ๊ธฐ๋Šฅ์„ ์‚ฌ์šฉํ•ด ์ž๋™์œผ๋กœ ์ด๋ฏธ์ง€ ๋ฐ์ดํ„ฐ๋ฅผ ๋””์ฝ”๋”ฉํ•˜๊ณ  [`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html)๋กœ ๋ถˆ๋Ÿฌ์˜ต๋‹ˆ๋‹ค. ์ด๋ฅผ ์‹œ๊ฐํ™” ํ•ด๋ณด๋ฉด: ```py >>> import matplotlib.pyplot as plt >>> fig, axs = plt.subplots(1, 4, figsize=(16, 4)) >>> for i, image in enumerate(dataset[:4]["image"]): ... axs[i].imshow(image) ... axs[i].set_axis_off() >>> fig.show() ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/butterflies_ds.png) ์ด๋ฏธ์ง€๋Š” ๋ชจ๋‘ ๋‹ค๋ฅธ ์‚ฌ์ด์ฆˆ์ด๊ธฐ ๋•Œ๋ฌธ์—, ์šฐ์„  ์ „์ฒ˜๋ฆฌ๊ฐ€ ํ•„์š”ํ•ฉ๋‹ˆ๋‹ค: - `Resize` ๋Š” `config.image_size` ์— ์ •์˜๋œ ์ด๋ฏธ์ง€ ์‚ฌ์ด์ฆˆ๋กœ ๋ณ€๊ฒฝํ•ฉ๋‹ˆ๋‹ค. - `RandomHorizontalFlip` ์€ ๋žœ๋ค์ ์œผ๋กœ ์ด๋ฏธ์ง€๋ฅผ ๋ฏธ๋Ÿฌ๋งํ•˜์—ฌ ๋ฐ์ดํ„ฐ์…‹์„ ๋ณด๊ฐ•ํ•ฉ๋‹ˆ๋‹ค. - `Normalize` ๋Š” ๋ชจ๋ธ์ด ์˜ˆ์ƒํ•˜๋Š” [-1, 1] ๋ฒ”์œ„๋กœ ํ”ฝ์…€ ๊ฐ’์„ ์žฌ์กฐ์ • ํ•˜๋Š”๋ฐ ์ค‘์š”ํ•ฉ๋‹ˆ๋‹ค. ```py >>> from torchvision import transforms >>> preprocess = transforms.Compose( ... [ ... transforms.Resize((config.image_size, config.image_size)), ... transforms.RandomHorizontalFlip(), ... transforms.ToTensor(), ... transforms.Normalize([0.5], [0.5]), ... ] ... ) ``` ํ•™์Šต ๋„์ค‘์— `preprocess` ํ•จ์ˆ˜๋ฅผ ์ ์šฉํ•˜๋ ค๋ฉด ๐Ÿค— Datasets์˜ [`~datasets.Dataset.set_transform`] ๋ฐฉ๋ฒ•์ด ์‚ฌ์šฉ๋ฉ๋‹ˆ๋‹ค. ```py >>> def transform(examples): ... images = [preprocess(image.convert("RGB")) for image in examples["image"]] ... return {"images": images} >>> dataset.set_transform(transform) ``` ์ด๋ฏธ์ง€์˜ ํฌ๊ธฐ๊ฐ€ ์กฐ์ •๋˜์—ˆ๋Š”์ง€ ํ™•์ธํ•˜๊ธฐ ์œ„ํ•ด ์ด๋ฏธ์ง€๋ฅผ ๋‹ค์‹œ ์‹œ๊ฐํ™”ํ•ด๋ณด์„ธ์š”. ์ด์ œ [DataLoader](https://pytorch.org/docs/stable/data#torch.utils.data.DataLoader)์— ๋ฐ์ดํ„ฐ์…‹์„ ํฌํ•จํ•ด ํ•™์Šตํ•  ์ค€๋น„๊ฐ€ ๋˜์—ˆ์Šต๋‹ˆ๋‹ค! ```py >>> import torch >>> train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=config.train_batch_size, shuffle=True) ``` ## UNet2DModel ์ƒ์„ฑํ•˜๊ธฐ ๐Ÿงจ Diffusers์— ์‚ฌ์ „ํ•™์Šต๋œ ๋ชจ๋ธ๋“ค์€ ๋ชจ๋ธ ํด๋ž˜์Šค์—์„œ ์›ํ•˜๋Š” ํŒŒ๋ผ๋ฏธํ„ฐ๋กœ ์‰ฝ๊ฒŒ ์ƒ์„ฑํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์˜ˆ๋ฅผ ๋“ค์–ด, [`UNet2DModel`]๋ฅผ ์ƒ์„ฑํ•˜๋ ค๋ฉด: ```py >>> from diffusers import UNet2DModel >>> model = UNet2DModel( ... sample_size=config.image_size, # ํƒ€๊ฒŸ ์ด๋ฏธ์ง€ ํ•ด์ƒ๋„ ... in_channels=3, # ์ž…๋ ฅ ์ฑ„๋„ ์ˆ˜, RGB ์ด๋ฏธ์ง€์—์„œ 3 ... out_channels=3, # ์ถœ๋ ฅ ์ฑ„๋„ ์ˆ˜ ... layers_per_block=2, # UNet ๋ธ”๋Ÿญ๋‹น ๋ช‡ ๊ฐœ์˜ ResNet ๋ ˆ์ด์–ด๊ฐ€ ์‚ฌ์šฉ๋˜๋Š”์ง€ ... block_out_channels=(128, 128, 256, 256, 512, 512), # ๊ฐ UNet ๋ธ”๋Ÿญ์„ ์œ„ํ•œ ์ถœ๋ ฅ ์ฑ„๋„ ์ˆ˜ ... down_block_types=( ... "DownBlock2D", # ์ผ๋ฐ˜์ ์ธ ResNet ๋‹ค์šด์ƒ˜ํ”Œ๋ง ๋ธ”๋Ÿญ ... "DownBlock2D", ... "DownBlock2D", ... "DownBlock2D", ... "AttnDownBlock2D", # spatial self-attention์ด ํฌํ•จ๋œ ์ผ๋ฐ˜์ ์ธ ResNet ๋‹ค์šด์ƒ˜ํ”Œ๋ง ๋ธ”๋Ÿญ ... "DownBlock2D", ... ), ... up_block_types=( ... "UpBlock2D", # ์ผ๋ฐ˜์ ์ธ ResNet ์—…์ƒ˜ํ”Œ๋ง ๋ธ”๋Ÿญ ... "AttnUpBlock2D", # spatial self-attention์ด ํฌํ•จ๋œ ์ผ๋ฐ˜์ ์ธ ResNet ์—…์ƒ˜ํ”Œ๋ง ๋ธ”๋Ÿญ ... "UpBlock2D", ... "UpBlock2D", ... "UpBlock2D", ... "UpBlock2D", ... ), ... ) ``` ์ƒ˜ํ”Œ์˜ ์ด๋ฏธ์ง€ ํฌ๊ธฐ์™€ ๋ชจ๋ธ ์ถœ๋ ฅ ํฌ๊ธฐ๊ฐ€ ๋งž๋Š”์ง€ ๋น ๋ฅด๊ฒŒ ํ™•์ธํ•˜๊ธฐ ์œ„ํ•œ ์ข‹์€ ์•„์ด๋””์–ด๊ฐ€ ์žˆ์Šต๋‹ˆ๋‹ค: ```py >>> sample_image = dataset[0]["images"].unsqueeze(0) >>> print("Input shape:", sample_image.shape) Input shape: torch.Size([1, 3, 128, 128]) >>> print("Output shape:", model(sample_image, timestep=0).sample.shape) Output shape: torch.Size([1, 3, 128, 128]) ``` ํ›Œ๋ฅญํ•ด์š”! ๋‹ค์Œ, ์ด๋ฏธ์ง€์— ์•ฝ๊ฐ„์˜ ๋…ธ์ด์ฆˆ๋ฅผ ๋”ํ•˜๊ธฐ ์œ„ํ•ด ์Šค์ผ€์ค„๋Ÿฌ๊ฐ€ ํ•„์š”ํ•ฉ๋‹ˆ๋‹ค. ## ์Šค์ผ€์ค„๋Ÿฌ ์ƒ์„ฑํ•˜๊ธฐ ์Šค์ผ€์ค„๋Ÿฌ๋Š” ๋ชจ๋ธ์„ ํ•™์Šต ๋˜๋Š” ์ถ”๋ก ์— ์‚ฌ์šฉํ•˜๋Š”์ง€์— ๋”ฐ๋ผ ๋‹ค๋ฅด๊ฒŒ ์ž‘๋™ํ•ฉ๋‹ˆ๋‹ค. ์ถ”๋ก ์‹œ์—, ์Šค์ผ€์ค„๋Ÿฌ๋Š” ๋…ธ์ด์ฆˆ๋กœ๋ถ€ํ„ฐ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค. ํ•™์Šต์‹œ ์Šค์ผ€์ค„๋Ÿฌ๋Š” diffusion ๊ณผ์ •์—์„œ์˜ ํŠน์ • ํฌ์ธํŠธ๋กœ๋ถ€ํ„ฐ ๋ชจ๋ธ์˜ ์ถœ๋ ฅ ๋˜๋Š” ์ƒ˜ํ”Œ์„ ๊ฐ€์ ธ์™€ *๋…ธ์ด์ฆˆ ์Šค์ผ€์ค„* ๊ณผ *์—…๋ฐ์ดํŠธ ๊ทœ์น™*์— ๋”ฐ๋ผ ์ด๋ฏธ์ง€์— ๋…ธ์ด์ฆˆ๋ฅผ ์ ์šฉํ•ฉ๋‹ˆ๋‹ค. `DDPMScheduler`๋ฅผ ๋ณด๋ฉด ์ด์ „์œผ๋กœ๋ถ€ํ„ฐ `sample_image`์— ๋žœ๋คํ•œ ๋…ธ์ด์ฆˆ๋ฅผ ๋”ํ•˜๋Š” `add_noise` ๋ฉ”์„œ๋“œ๋ฅผ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค: ```py >>> import torch >>> from PIL import Image >>> from diffusers import DDPMScheduler >>> noise_scheduler = DDPMScheduler(num_train_timesteps=1000) >>> noise = torch.randn(sample_image.shape) >>> timesteps = torch.LongTensor([50]) >>> noisy_image = noise_scheduler.add_noise(sample_image, noise, timesteps) >>> Image.fromarray(((noisy_image.permute(0, 2, 3, 1) + 1.0) * 127.5).type(torch.uint8).numpy()[0]) ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/noisy_butterfly.png) ๋ชจ๋ธ์˜ ํ•™์Šต ๋ชฉ์ ์€ ์ด๋ฏธ์ง€์— ๋”ํ•ด์ง„ ๋…ธ์ด์ฆˆ๋ฅผ ์˜ˆ์ธกํ•˜๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค. ์ด ๋‹จ๊ณ„์—์„œ ์†์‹ค์€ ๋‹ค์Œ๊ณผ ๊ฐ™์ด ๊ณ„์‚ฐ๋  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```py >>> import torch.nn.functional as F >>> noise_pred = model(noisy_image, timesteps).sample >>> loss = F.mse_loss(noise_pred, noise) ``` ## ๋ชจ๋ธ ํ•™์Šตํ•˜๊ธฐ ์ง€๊ธˆ๊นŒ์ง€, ๋ชจ๋ธ ํ•™์Šต์„ ์‹œ์ž‘ํ•˜๊ธฐ ์œ„ํ•ด ๋งŽ์€ ๋ถ€๋ถ„์„ ๊ฐ–์ถ”์—ˆ์œผ๋ฉฐ ์ด์ œ ๋‚จ์€ ๊ฒƒ์€ ๋ชจ๋“  ๊ฒƒ์„ ์กฐํ•ฉํ•˜๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค. ์šฐ์„  ์˜ตํ‹ฐ๋งˆ์ด์ €(optimizer)์™€ ํ•™์Šต๋ฅ  ์Šค์ผ€์ค„๋Ÿฌ(learning rate scheduler)๊ฐ€ ํ•„์š”ํ•  ๊ฒƒ์ž…๋‹ˆ๋‹ค: ```py >>> from diffusers.optimization import get_cosine_schedule_with_warmup >>> optimizer = torch.optim.AdamW(model.parameters(), lr=config.learning_rate) >>> lr_scheduler = get_cosine_schedule_with_warmup( ... optimizer=optimizer, ... num_warmup_steps=config.lr_warmup_steps, ... num_training_steps=(len(train_dataloader) * config.num_epochs), ... ) ``` ๊ทธ ํ›„, ๋ชจ๋ธ์„ ํ‰๊ฐ€ํ•˜๋Š” ๋ฐฉ๋ฒ•์ด ํ•„์š”ํ•ฉ๋‹ˆ๋‹ค. ํ‰๊ฐ€๋ฅผ ์œ„ํ•ด, `DDPMPipeline`์„ ์‚ฌ์šฉํ•ด ๋ฐฐ์น˜์˜ ์ด๋ฏธ์ง€ ์ƒ˜ํ”Œ๋“ค์„ ์ƒ์„ฑํ•˜๊ณ  ๊ทธ๋ฆฌ๋“œ ํ˜•ํƒœ๋กœ ์ €์žฅํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```py >>> from diffusers import DDPMPipeline >>> import math >>> import os >>> def make_grid(images, rows, cols): ... w, h = images[0].size ... grid = Image.new("RGB", size=(cols * w, rows * h)) ... for i, image in enumerate(images): ... grid.paste(image, box=(i % cols * w, i // cols * h)) ... return grid >>> def evaluate(config, epoch, pipeline): ... # ๋žœ๋คํ•œ ๋…ธ์ด์ฆˆ๋กœ ๋ถ€ํ„ฐ ์ด๋ฏธ์ง€๋ฅผ ์ถ”์ถœํ•ฉ๋‹ˆ๋‹ค.(์ด๋Š” ์—ญ์ „ํŒŒ diffusion ๊ณผ์ •์ž…๋‹ˆ๋‹ค.) ... # ๊ธฐ๋ณธ ํŒŒ์ดํ”„๋ผ์ธ ์ถœ๋ ฅ ํ˜•ํƒœ๋Š” `List[PIL.Image]` ์ž…๋‹ˆ๋‹ค. ... images = pipeline( ... batch_size=config.eval_batch_size, ... generator=torch.manual_seed(config.seed), ... ).images ... # ์ด๋ฏธ์ง€๋“ค์„ ๊ทธ๋ฆฌ๋“œ๋กœ ๋งŒ๋“ค์–ด์ค๋‹ˆ๋‹ค. ... image_grid = make_grid(images, rows=4, cols=4) ... # ์ด๋ฏธ์ง€๋“ค์„ ์ €์žฅํ•ฉ๋‹ˆ๋‹ค. ... test_dir = os.path.join(config.output_dir, "samples") ... os.makedirs(test_dir, exist_ok=True) ... image_grid.save(f"{test_dir}/{epoch:04d}.png") ``` TensorBoard์— ๋กœ๊น…, ๊ทธ๋ž˜๋””์–ธํŠธ ๋ˆ„์  ๋ฐ ํ˜ผํ•ฉ ์ •๋ฐ€๋„ ํ•™์Šต์„ ์‰ฝ๊ฒŒ ์ˆ˜ํ–‰ํ•˜๊ธฐ ์œ„ํ•ด ๐Ÿค— Accelerate๋ฅผ ํ•™์Šต ๋ฃจํ”„์— ํ•จ๊ป˜ ์•ž์„œ ๋งํ•œ ๋ชจ๋“  ๊ตฌ์„ฑ ์ •๋ณด๋“ค์„ ๋ฌถ์–ด ์ง„ํ–‰ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ํ—ˆ๋ธŒ์— ๋ชจ๋ธ์„ ์—…๋กœ๋“œ ํ•˜๊ธฐ ์œ„ํ•ด ๋ฆฌํฌ์ง€ํ† ๋ฆฌ ์ด๋ฆ„ ๋ฐ ์ •๋ณด๋ฅผ ๊ฐ€์ ธ์˜ค๊ธฐ ์œ„ํ•œ ํ•จ์ˆ˜๋ฅผ ์ž‘์„ฑํ•˜๊ณ  ํ—ˆ๋ธŒ์— ์—…๋กœ๋“œํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๐Ÿ’ก์•„๋ž˜์˜ ํ•™์Šต ๋ฃจํ”„๋Š” ์–ด๋ ต๊ณ  ๊ธธ์–ด ๋ณด์ผ ์ˆ˜ ์žˆ์ง€๋งŒ, ๋‚˜์ค‘์— ํ•œ ์ค„์˜ ์ฝ”๋“œ๋กœ ํ•™์Šต์„ ํ•œ๋‹ค๋ฉด ๊ทธ๋งŒํ•œ ๊ฐ€์น˜๊ฐ€ ์žˆ์„ ๊ฒƒ์ž…๋‹ˆ๋‹ค! ๋งŒ์•ฝ ๊ธฐ๋‹ค๋ฆฌ์ง€ ๋ชปํ•˜๊ณ  ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•˜๊ณ  ์‹ถ๋‹ค๋ฉด, ์•„๋ž˜ ์ฝ”๋“œ๋ฅผ ์ž์œ ๋กญ๊ฒŒ ๋ถ™์—ฌ๋„ฃ๊ณ  ์ž‘๋™์‹œํ‚ค๋ฉด ๋ฉ๋‹ˆ๋‹ค. ๐Ÿค— ```py >>> from accelerate import Accelerator >>> from huggingface_hub import create_repo, upload_folder >>> from tqdm.auto import tqdm >>> from pathlib import Path >>> import os >>> def train_loop(config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler): ... # Initialize accelerator and tensorboard logging ... accelerator = Accelerator( ... mixed_precision=config.mixed_precision, ... gradient_accumulation_steps=config.gradient_accumulation_steps, ... log_with="tensorboard", ... project_dir=os.path.join(config.output_dir, "logs"), ... ) ... if accelerator.is_main_process: ... if config.output_dir is not None: ... os.makedirs(config.output_dir, exist_ok=True) ... if config.push_to_hub: ... repo_id = create_repo( ... repo_id=config.hub_model_id or Path(config.output_dir).name, exist_ok=True ... ).repo_id ... accelerator.init_trackers("train_example") ... # ๋ชจ๋“  ๊ฒƒ์ด ์ค€๋น„๋˜์—ˆ์Šต๋‹ˆ๋‹ค. ... # ๊ธฐ์–ตํ•ด์•ผ ํ•  ํŠน์ •ํ•œ ์ˆœ์„œ๋Š” ์—†์œผ๋ฉฐ ์ค€๋น„ํ•œ ๋ฐฉ๋ฒ•์— ์ œ๊ณตํ•œ ๊ฒƒ๊ณผ ๋™์ผํ•œ ์ˆœ์„œ๋กœ ๊ฐ์ฒด์˜ ์••์ถ•์„ ํ’€๋ฉด ๋ฉ๋‹ˆ๋‹ค. ... model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( ... model, optimizer, train_dataloader, lr_scheduler ... ) ... global_step = 0 ... # ์ด์ œ ๋ชจ๋ธ์„ ํ•™์Šตํ•ฉ๋‹ˆ๋‹ค. ... for epoch in range(config.num_epochs): ... progress_bar = tqdm(total=len(train_dataloader), disable=not accelerator.is_local_main_process) ... progress_bar.set_description(f"Epoch {epoch}") ... for step, batch in enumerate(train_dataloader): ... clean_images = batch["images"] ... # ์ด๋ฏธ์ง€์— ๋”ํ•  ๋…ธ์ด์ฆˆ๋ฅผ ์ƒ˜ํ”Œ๋งํ•ฉ๋‹ˆ๋‹ค. ... noise = torch.randn(clean_images.shape, device=clean_images.device) ... bs = clean_images.shape[0] ... # ๊ฐ ์ด๋ฏธ์ง€๋ฅผ ์œ„ํ•œ ๋žœ๋คํ•œ ํƒ€์ž„์Šคํ…(timestep)์„ ์ƒ˜ํ”Œ๋งํ•ฉ๋‹ˆ๋‹ค. ... timesteps = torch.randint( ... 0, noise_scheduler.config.num_train_timesteps, (bs,), device=clean_images.device, ... dtype=torch.int64 ... ) ... # ๊ฐ ํƒ€์ž„์Šคํ…์˜ ๋…ธ์ด์ฆˆ ํฌ๊ธฐ์— ๋”ฐ๋ผ ๊นจ๋—ํ•œ ์ด๋ฏธ์ง€์— ๋…ธ์ด์ฆˆ๋ฅผ ์ถ”๊ฐ€ํ•ฉ๋‹ˆ๋‹ค. ... # (์ด๋Š” foward diffusion ๊ณผ์ •์ž…๋‹ˆ๋‹ค.) ... noisy_images = noise_scheduler.add_noise(clean_images, noise, timesteps) ... with accelerator.accumulate(model): ... # ๋…ธ์ด์ฆˆ๋ฅผ ๋ฐ˜๋ณต์ ์œผ๋กœ ์˜ˆ์ธกํ•ฉ๋‹ˆ๋‹ค. ... noise_pred = model(noisy_images, timesteps, return_dict=False)[0] ... loss = F.mse_loss(noise_pred, noise) ... accelerator.backward(loss) ... accelerator.clip_grad_norm_(model.parameters(), 1.0) ... optimizer.step() ... lr_scheduler.step() ... optimizer.zero_grad() ... progress_bar.update(1) ... logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0], "step": global_step} ... progress_bar.set_postfix(**logs) ... accelerator.log(logs, step=global_step) ... global_step += 1 ... # ๊ฐ ์—ํฌํฌ๊ฐ€ ๋๋‚œ ํ›„ evaluate()์™€ ๋ช‡ ๊ฐ€์ง€ ๋ฐ๋ชจ ์ด๋ฏธ์ง€๋ฅผ ์„ ํƒ์ ์œผ๋กœ ์ƒ˜ํ”Œ๋งํ•˜๊ณ  ๋ชจ๋ธ์„ ์ €์žฅํ•ฉ๋‹ˆ๋‹ค. ... if accelerator.is_main_process: ... pipeline = DDPMPipeline(unet=accelerator.unwrap_model(model), scheduler=noise_scheduler) ... if (epoch + 1) % config.save_image_epochs == 0 or epoch == config.num_epochs - 1: ... evaluate(config, epoch, pipeline) ... if (epoch + 1) % config.save_model_epochs == 0 or epoch == config.num_epochs - 1: ... if config.push_to_hub: ... upload_folder( ... repo_id=repo_id, ... folder_path=config.output_dir, ... commit_message=f"Epoch {epoch}", ... ignore_patterns=["step_*", "epoch_*"], ... ) ... else: ... pipeline.save_pretrained(config.output_dir) ``` ํœด, ์ฝ”๋“œ๊ฐ€ ๊ฝค ๋งŽ์•˜๋„ค์š”! ํ•˜์ง€๋งŒ ๐Ÿค— Accelerate์˜ [`~accelerate.notebook_launcher`] ํ•จ์ˆ˜์™€ ํ•™์Šต์„ ์‹œ์ž‘ํ•  ์ค€๋น„๊ฐ€ ๋˜์—ˆ์Šต๋‹ˆ๋‹ค. ํ•จ์ˆ˜์— ํ•™์Šต ๋ฃจํ”„, ๋ชจ๋“  ํ•™์Šต ์ธ์ˆ˜, ํ•™์Šต์— ์‚ฌ์šฉํ•  ํ”„๋กœ์„ธ์Šค ์ˆ˜(์‚ฌ์šฉ ๊ฐ€๋Šฅํ•œ GPU์˜ ์ˆ˜๋ฅผ ๋ณ€๊ฒฝํ•  ์ˆ˜ ์žˆ์Œ)๋ฅผ ์ „๋‹ฌํ•ฉ๋‹ˆ๋‹ค: ```py >>> from accelerate import notebook_launcher >>> args = (config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler) >>> notebook_launcher(train_loop, args, num_processes=1) ``` ํ•œ๋ฒˆ ํ•™์Šต์ด ์™„๋ฃŒ๋˜๋ฉด, diffusion ๋ชจ๋ธ๋กœ ์ƒ์„ฑ๋œ ์ตœ์ข… ๐Ÿฆ‹์ด๋ฏธ์ง€๐Ÿฆ‹๋ฅผ ํ™•์ธํ•ด๋ณด๊ธธ ๋ฐ”๋ž๋‹ˆ๋‹ค! ```py >>> import glob >>> sample_images = sorted(glob.glob(f"{config.output_dir}/samples/*.png")) >>> Image.open(sample_images[-1]) ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/butterflies_final.png) ## ๋‹ค์Œ ๋‹จ๊ณ„ Unconditional ์ด๋ฏธ์ง€ ์ƒ์„ฑ์€ ํ•™์Šต๋  ์ˆ˜ ์žˆ๋Š” ์ž‘์—… ์ค‘ ํ•˜๋‚˜์˜ ์˜ˆ์‹œ์ž…๋‹ˆ๋‹ค. ๋‹ค๋ฅธ ์ž‘์—…๊ณผ ํ•™์Šต ๋ฐฉ๋ฒ•์€ [๐Ÿงจ Diffusers ํ•™์Šต ์˜ˆ์‹œ](../training/overview) ํŽ˜์ด์ง€์—์„œ ํ™•์ธํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋‹ค์Œ์€ ํ•™์Šตํ•  ์ˆ˜ ์žˆ๋Š” ๋ช‡ ๊ฐ€์ง€ ์˜ˆ์‹œ์ž…๋‹ˆ๋‹ค: - [Textual Inversion](../training/text_inversion), ํŠน์ • ์‹œ๊ฐ์  ๊ฐœ๋…์„ ํ•™์Šต์‹œ์ผœ ์ƒ์„ฑ๋œ ์ด๋ฏธ์ง€์— ํ†ตํ•ฉ์‹œํ‚ค๋Š” ์•Œ๊ณ ๋ฆฌ์ฆ˜์ž…๋‹ˆ๋‹ค. - [DreamBooth](../training/dreambooth), ์ฃผ์ œ์— ๋Œ€ํ•œ ๋ช‡ ๊ฐ€์ง€ ์ž…๋ ฅ ์ด๋ฏธ์ง€๋“ค์ด ์ฃผ์–ด์ง€๋ฉด ์ฃผ์ œ์— ๋Œ€ํ•œ ๊ฐœ์ธํ™”๋œ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•˜๊ธฐ ์œ„ํ•œ ๊ธฐ์ˆ ์ž…๋‹ˆ๋‹ค. - [Guide](../training/text2image) ๋ฐ์ดํ„ฐ์…‹์— Stable Diffusion ๋ชจ๋ธ์„ ํŒŒ์ธํŠœ๋‹ํ•˜๋Š” ๋ฐฉ๋ฒ•์ž…๋‹ˆ๋‹ค. - [Guide](../training/lora) LoRA๋ฅผ ์‚ฌ์šฉํ•ด ๋งค์šฐ ํฐ ๋ชจ๋ธ์„ ๋น ๋ฅด๊ฒŒ ํŒŒ์ธํŠœ๋‹ํ•˜๊ธฐ ์œ„ํ•œ ๋ฉ”๋ชจ๋ฆฌ ํšจ์œจ์ ์ธ ๊ธฐ์ˆ ์ž…๋‹ˆ๋‹ค.
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/using-diffusers/stable_diffusion_jax_how_to.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # JAX / Flax์—์„œ์˜ ๐Ÿงจ Stable Diffusion! [[open-in-colab]] ๐Ÿค— Hugging Face [Diffusers] (https://github.com/huggingface/diffusers) ๋Š” ๋ฒ„์ „ 0.5.1๋ถ€ํ„ฐ Flax๋ฅผ ์ง€์›ํ•ฉ๋‹ˆ๋‹ค! ์ด๋ฅผ ํ†ตํ•ด Colab, Kaggle, Google Cloud Platform์—์„œ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ๋Š” ๊ฒƒ์ฒ˜๋Ÿผ Google TPU์—์„œ ์ดˆ๊ณ ์† ์ถ”๋ก ์ด ๊ฐ€๋Šฅํ•ฉ๋‹ˆ๋‹ค. ์ด ๋…ธํŠธ๋ถ์€ JAX / Flax๋ฅผ ์‚ฌ์šฉํ•ด ์ถ”๋ก ์„ ์‹คํ–‰ํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ๋ณด์—ฌ์ค๋‹ˆ๋‹ค. Stable Diffusion์˜ ์ž‘๋™ ๋ฐฉ์‹์— ๋Œ€ํ•œ ์ž์„ธํ•œ ๋‚ด์šฉ์„ ์›ํ•˜๊ฑฐ๋‚˜ GPU์—์„œ ์‹คํ–‰ํ•˜๋ ค๋ฉด ์ด [๋…ธํŠธ๋ถ] ](https://huggingface.co/docs/diffusers/stable_diffusion)์„ ์ฐธ์กฐํ•˜์„ธ์š”. ๋จผ์ €, TPU ๋ฐฑ์—”๋“œ๋ฅผ ์‚ฌ์šฉํ•˜๊ณ  ์žˆ๋Š”์ง€ ํ™•์ธํ•ฉ๋‹ˆ๋‹ค. Colab์—์„œ ์ด ๋…ธํŠธ๋ถ์„ ์‹คํ–‰ํ•˜๋Š” ๊ฒฝ์šฐ, ๋ฉ”๋‰ด์—์„œ ๋Ÿฐํƒ€์ž„์„ ์„ ํƒํ•œ ๋‹ค์Œ "๋Ÿฐํƒ€์ž„ ์œ ํ˜• ๋ณ€๊ฒฝ" ์˜ต์…˜์„ ์„ ํƒํ•œ ๋‹ค์Œ ํ•˜๋“œ์›จ์–ด ๊ฐ€์†๊ธฐ ์„ค์ •์—์„œ TPU๋ฅผ ์„ ํƒํ•ฉ๋‹ˆ๋‹ค. JAX๋Š” TPU ์ „์šฉ์€ ์•„๋‹ˆ์ง€๋งŒ ๊ฐ TPU ์„œ๋ฒ„์—๋Š” 8๊ฐœ์˜ TPU ๊ฐ€์†๊ธฐ๊ฐ€ ๋ณ‘๋ ฌ๋กœ ์ž‘๋™ํ•˜๊ธฐ ๋•Œ๋ฌธ์— ํ•ด๋‹น ํ•˜๋“œ์›จ์–ด์—์„œ ๋” ๋น›์„ ๋ฐœํ•œ๋‹ค๋Š” ์ ์€ ์•Œ์•„๋‘์„ธ์š”. ## Setup ๋จผ์ € diffusers๊ฐ€ ์„ค์น˜๋˜์–ด ์žˆ๋Š”์ง€ ํ™•์ธํ•ฉ๋‹ˆ๋‹ค. ```bash !pip install jax==0.3.25 jaxlib==0.3.25 flax transformers ftfy !pip install diffusers ``` ```python import jax.tools.colab_tpu jax.tools.colab_tpu.setup_tpu() import jax ``` ```python num_devices = jax.device_count() device_type = jax.devices()[0].device_kind print(f"Found {num_devices} JAX devices of type {device_type}.") assert ( "TPU" in device_type ), "Available device is not a TPU, please select TPU from Edit > Notebook settings > Hardware accelerator" ``` ```python out Found 8 JAX devices of type Cloud TPU. ``` ๊ทธ๋Ÿฐ ๋‹ค์Œ ๋ชจ๋“  dependencies๋ฅผ ๊ฐ€์ ธ์˜ต๋‹ˆ๋‹ค. ```python import numpy as np import jax import jax.numpy as jnp from pathlib import Path from jax import pmap from flax.jax_utils import replicate from flax.training.common_utils import shard from PIL import Image from huggingface_hub import notebook_login from diffusers import FlaxStableDiffusionPipeline ``` ## ๋ชจ๋ธ ๋ถˆ๋Ÿฌ์˜ค๊ธฐ TPU ์žฅ์น˜๋Š” ํšจ์œจ์ ์ธ half-float ์œ ํ˜•์ธ bfloat16์„ ์ง€์›ํ•ฉ๋‹ˆ๋‹ค. ํ…Œ์ŠคํŠธ์—๋Š” ์ด ์œ ํ˜•์„ ์‚ฌ์šฉํ•˜์ง€๋งŒ ๋Œ€์‹  float32๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์ „์ฒด ์ •๋ฐ€๋„(full precision)๋ฅผ ์‚ฌ์šฉํ•  ์ˆ˜๋„ ์žˆ์Šต๋‹ˆ๋‹ค. ```python dtype = jnp.bfloat16 ``` Flax๋Š” ํ•จ์ˆ˜ํ˜• ํ”„๋ ˆ์ž„์›Œํฌ์ด๋ฏ€๋กœ ๋ชจ๋ธ์€ ๋ฌด์ƒํƒœ(stateless)ํ˜•์ด๋ฉฐ ๋งค๊ฐœ๋ณ€์ˆ˜๋Š” ๋ชจ๋ธ ์™ธ๋ถ€์— ์ €์žฅ๋ฉ๋‹ˆ๋‹ค. ์‚ฌ์ „ํ•™์Šต๋œ Flax ํŒŒ์ดํ”„๋ผ์ธ์„ ๋ถˆ๋Ÿฌ์˜ค๋ฉด ํŒŒ์ดํ”„๋ผ์ธ ์ž์ฒด์™€ ๋ชจ๋ธ ๊ฐ€์ค‘์น˜(๋˜๋Š” ๋งค๊ฐœ๋ณ€์ˆ˜)๊ฐ€ ๋ชจ๋‘ ๋ฐ˜ํ™˜๋ฉ๋‹ˆ๋‹ค. ์ €ํฌ๋Š” bf16 ๋ฒ„์ „์˜ ๊ฐ€์ค‘์น˜๋ฅผ ์‚ฌ์šฉํ•˜๊ณ  ์žˆ์œผ๋ฏ€๋กœ ์œ ํ˜• ๊ฒฝ๊ณ ๊ฐ€ ํ‘œ์‹œ๋˜์ง€๋งŒ ๋ฌด์‹œํ•ด๋„ ๋ฉ๋‹ˆ๋‹ค. ```python pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=dtype, ) ``` ## ์ถ”๋ก  TPU์—๋Š” ์ผ๋ฐ˜์ ์œผ๋กœ 8๊ฐœ์˜ ๋””๋ฐ”์ด์Šค๊ฐ€ ๋ณ‘๋ ฌ๋กœ ์ž‘๋™ํ•˜๋ฏ€๋กœ ๋ณด์œ ํ•œ ๋””๋ฐ”์ด์Šค ์ˆ˜๋งŒํผ ํ”„๋กฌํ”„ํŠธ๋ฅผ ๋ณต์ œํ•ฉ๋‹ˆ๋‹ค. ๊ทธ๋Ÿฐ ๋‹ค์Œ ๊ฐ๊ฐ ํ•˜๋‚˜์˜ ์ด๋ฏธ์ง€ ์ƒ์„ฑ์„ ๋‹ด๋‹นํ•˜๋Š” 8๊ฐœ์˜ ๋””๋ฐ”์ด์Šค์—์„œ ํ•œ ๋ฒˆ์— ์ถ”๋ก ์„ ์ˆ˜ํ–‰ํ•ฉ๋‹ˆ๋‹ค. ๋”ฐ๋ผ์„œ ํ•˜๋‚˜์˜ ์นฉ์ด ํ•˜๋‚˜์˜ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•˜๋Š” ๋ฐ ๊ฑธ๋ฆฌ๋Š” ์‹œ๊ฐ„๊ณผ ๋™์ผํ•œ ์‹œ๊ฐ„์— 8๊ฐœ์˜ ์ด๋ฏธ์ง€๋ฅผ ์–ป์„ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ํ”„๋กฌํ”„ํŠธ๋ฅผ ๋ณต์ œํ•˜๊ณ  ๋‚˜๋ฉด ํŒŒ์ดํ”„๋ผ์ธ์˜ `prepare_inputs` ํ•จ์ˆ˜๋ฅผ ํ˜ธ์ถœํ•˜์—ฌ ํ† ํฐํ™”๋œ ํ…์ŠคํŠธ ID๋ฅผ ์–ป์Šต๋‹ˆ๋‹ค. ํ† ํฐํ™”๋œ ํ…์ŠคํŠธ์˜ ๊ธธ์ด๋Š” ๊ธฐ๋ณธ CLIP ํ…์ŠคํŠธ ๋ชจ๋ธ์˜ ๊ตฌ์„ฑ์— ๋”ฐ๋ผ 77ํ† ํฐ์œผ๋กœ ์„ค์ •๋ฉ๋‹ˆ๋‹ค. ```python prompt = "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of field, close up, split lighting, cinematic" prompt = [prompt] * jax.device_count() prompt_ids = pipeline.prepare_inputs(prompt) prompt_ids.shape ``` ```python out (8, 77) ``` ### ๋ณต์‚ฌ(Replication) ๋ฐ ์ •๋ ฌํ™” ๋ชจ๋ธ ๋งค๊ฐœ๋ณ€์ˆ˜์™€ ์ž…๋ ฅ๊ฐ’์€ ์šฐ๋ฆฌ๊ฐ€ ๋ณด์œ ํ•œ 8๊ฐœ์˜ ๋ณ‘๋ ฌ ์žฅ์น˜์— ๋ณต์‚ฌ(Replication)๋˜์–ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ๋งค๊ฐœ๋ณ€์ˆ˜ ๋”•์…”๋„ˆ๋ฆฌ๋Š” `flax.jax_utils.replicate`(๋”•์…”๋„ˆ๋ฆฌ๋ฅผ ์ˆœํšŒํ•˜๋ฉฐ ๊ฐ€์ค‘์น˜์˜ ๋ชจ์–‘์„ ๋ณ€๊ฒฝํ•˜์—ฌ 8๋ฒˆ ๋ฐ˜๋ณตํ•˜๋Š” ํ•จ์ˆ˜)๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ๋ณต์‚ฌ๋ฉ๋‹ˆ๋‹ค. ๋ฐฐ์—ด์€ `shard`๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ๋ณต์ œ๋ฉ๋‹ˆ๋‹ค. ```python p_params = replicate(params) ``` ```python prompt_ids = shard(prompt_ids) prompt_ids.shape ``` ```python out (8, 1, 77) ``` ์ด shape์€ 8๊ฐœ์˜ ๋””๋ฐ”์ด์Šค ๊ฐ๊ฐ์ด shape `(1, 77)`์˜ jnp ๋ฐฐ์—ด์„ ์ž…๋ ฅ๊ฐ’์œผ๋กœ ๋ฐ›๋Š”๋‹ค๋Š” ์˜๋ฏธ์ž…๋‹ˆ๋‹ค. ์ฆ‰ 1์€ ๋””๋ฐ”์ด์Šค๋‹น batch(๋ฐฐ์น˜) ํฌ๊ธฐ์ž…๋‹ˆ๋‹ค. ๋ฉ”๋ชจ๋ฆฌ๊ฐ€ ์ถฉ๋ถ„ํ•œ TPU์—์„œ๋Š” ํ•œ ๋ฒˆ์— ์—ฌ๋Ÿฌ ์ด๋ฏธ์ง€(์นฉ๋‹น)๋ฅผ ์ƒ์„ฑํ•˜๋ ค๋Š” ๊ฒฝ์šฐ 1๋ณด๋‹ค ํด ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•  ์ค€๋น„๊ฐ€ ๊ฑฐ์˜ ์™„๋ฃŒ๋˜์—ˆ์Šต๋‹ˆ๋‹ค! ์ด์ œ ์ƒ์„ฑ ํ•จ์ˆ˜์— ์ „๋‹ฌํ•  ๋‚œ์ˆ˜ ์ƒ์„ฑ๊ธฐ๋งŒ ๋งŒ๋“ค๋ฉด ๋ฉ๋‹ˆ๋‹ค. ์ด๊ฒƒ์€ ๋‚œ์ˆ˜๋ฅผ ๋‹ค๋ฃจ๋Š” ๋ชจ๋“  ํ•จ์ˆ˜์— ๋‚œ์ˆ˜ ์ƒ์„ฑ๊ธฐ๊ฐ€ ์žˆ์–ด์•ผ ํ•œ๋‹ค๋Š”, ๋‚œ์ˆ˜์— ๋Œ€ํ•ด ๋งค์šฐ ์ง„์ง€ํ•˜๊ณ  ๋…๋‹จ์ ์ธ Flax์˜ ํ‘œ์ค€ ์ ˆ์ฐจ์ž…๋‹ˆ๋‹ค. ์ด๋ ‡๊ฒŒ ํ•˜๋ฉด ์—ฌ๋Ÿฌ ๋ถ„์‚ฐ๋œ ๊ธฐ๊ธฐ์—์„œ ํ›ˆ๋ จํ•  ๋•Œ์—๋„ ์žฌํ˜„์„ฑ์ด ๋ณด์žฅ๋ฉ๋‹ˆ๋‹ค. ์•„๋ž˜ ํ—ฌํผ ํ•จ์ˆ˜๋Š” ์‹œ๋“œ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ๋‚œ์ˆ˜ ์ƒ์„ฑ๊ธฐ๋ฅผ ์ดˆ๊ธฐํ™”ํ•ฉ๋‹ˆ๋‹ค. ๋™์ผํ•œ ์‹œ๋“œ๋ฅผ ์‚ฌ์šฉํ•˜๋Š” ํ•œ ์ •ํ™•ํžˆ ๋™์ผํ•œ ๊ฒฐ๊ณผ๋ฅผ ์–ป์„ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋‚˜์ค‘์— ๋…ธํŠธ๋ถ์—์„œ ๊ฒฐ๊ณผ๋ฅผ ํƒ์ƒ‰ํ•  ๋•Œ์—” ๋‹ค๋ฅธ ์‹œ๋“œ๋ฅผ ์ž์œ ๋กญ๊ฒŒ ์‚ฌ์šฉํ•˜์„ธ์š”. ```python def create_key(seed=0): return jax.random.PRNGKey(seed) ``` rng๋ฅผ ์–ป์€ ๋‹ค์Œ 8๋ฒˆ '๋ถ„ํ• 'ํ•˜์—ฌ ๊ฐ ๋””๋ฐ”์ด์Šค๊ฐ€ ๋‹ค๋ฅธ ์ œ๋„ˆ๋ ˆ์ดํ„ฐ๋ฅผ ์ˆ˜์‹ ํ•˜๋„๋ก ํ•ฉ๋‹ˆ๋‹ค. ๋”ฐ๋ผ์„œ ๊ฐ ๋””๋ฐ”์ด์Šค๋งˆ๋‹ค ๋‹ค๋ฅธ ์ด๋ฏธ์ง€๊ฐ€ ์ƒ์„ฑ๋˜๋ฉฐ ์ „์ฒด ํ”„๋กœ์„ธ์Šค๋ฅผ ์žฌํ˜„ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ```python rng = create_key(0) rng = jax.random.split(rng, jax.device_count()) ``` JAX ์ฝ”๋“œ๋Š” ๋งค์šฐ ๋น ๋ฅด๊ฒŒ ์‹คํ–‰๋˜๋Š” ํšจ์œจ์ ์ธ ํ‘œํ˜„์œผ๋กœ ์ปดํŒŒ์ผํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ํ•˜์ง€๋งŒ ํ›„์† ํ˜ธ์ถœ์—์„œ ๋ชจ๋“  ์ž…๋ ฅ์ด ๋™์ผํ•œ ๋ชจ์–‘์„ ๊ฐ–๋„๋ก ํ•ด์•ผ ํ•˜๋ฉฐ, ๊ทธ๋ ‡์ง€ ์•Š์œผ๋ฉด JAX๊ฐ€ ์ฝ”๋“œ๋ฅผ ๋‹ค์‹œ ์ปดํŒŒ์ผํ•ด์•ผ ํ•˜๋ฏ€๋กœ ์ตœ์ ํ™”๋œ ์†๋„๋ฅผ ํ™œ์šฉํ•  ์ˆ˜ ์—†์Šต๋‹ˆ๋‹ค. `jit = True`๋ฅผ ์ธ์ˆ˜๋กœ ์ „๋‹ฌํ•˜๋ฉด Flax ํŒŒ์ดํ”„๋ผ์ธ์ด ์ฝ”๋“œ๋ฅผ ์ปดํŒŒ์ผํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋˜ํ•œ ๋ชจ๋ธ์ด ์‚ฌ์šฉ ๊ฐ€๋Šฅํ•œ 8๊ฐœ์˜ ๋””๋ฐ”์ด์Šค์—์„œ ๋ณ‘๋ ฌ๋กœ ์‹คํ–‰๋˜๋„๋ก ๋ณด์žฅํ•ฉ๋‹ˆ๋‹ค. ๋‹ค์Œ ์…€์„ ์ฒ˜์Œ ์‹คํ–‰ํ•˜๋ฉด ์ปดํŒŒ์ผํ•˜๋Š” ๋ฐ ์‹œ๊ฐ„์ด ์˜ค๋ž˜ ๊ฑธ๋ฆฌ์ง€๋งŒ ์ดํ›„ ํ˜ธ์ถœ(์ž…๋ ฅ์ด ๋‹ค๋ฅธ ๊ฒฝ์šฐ์—๋„)์€ ํ›จ์”ฌ ๋นจ๋ผ์ง‘๋‹ˆ๋‹ค. ์˜ˆ๋ฅผ ๋“ค์–ด, ํ…Œ์ŠคํŠธํ–ˆ์„ ๋•Œ TPU v2-8์—์„œ ์ปดํŒŒ์ผํ•˜๋Š” ๋ฐ 1๋ถ„ ์ด์ƒ ๊ฑธ๋ฆฌ์ง€๋งŒ ์ดํ›„ ์ถ”๋ก  ์‹คํ–‰์—๋Š” ์•ฝ 7์ดˆ๊ฐ€ ๊ฑธ๋ฆฝ๋‹ˆ๋‹ค. ``` %%time images = pipeline(prompt_ids, p_params, rng, jit=True)[0] ``` ```python out CPU times: user 56.2 s, sys: 42.5 s, total: 1min 38s Wall time: 1min 29s ``` ๋ฐ˜ํ™˜๋œ ๋ฐฐ์—ด์˜ shape์€ `(8, 1, 512, 512, 3)`์ž…๋‹ˆ๋‹ค. ์ด๋ฅผ ์žฌ๊ตฌ์„ฑํ•˜์—ฌ ๋‘ ๋ฒˆ์งธ ์ฐจ์›์„ ์ œ๊ฑฐํ•˜๊ณ  512 ร— 512 ร— 3์˜ ์ด๋ฏธ์ง€ 8๊ฐœ๋ฅผ ์–ป์€ ๋‹ค์Œ PIL๋กœ ๋ณ€ํ™˜ํ•ฉ๋‹ˆ๋‹ค. ```python images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) images = pipeline.numpy_to_pil(images) ``` ### ์‹œ๊ฐํ™” ์ด๋ฏธ์ง€๋ฅผ ๊ทธ๋ฆฌ๋“œ์— ํ‘œ์‹œํ•˜๋Š” ๋„์šฐ๋ฏธ ํ•จ์ˆ˜๋ฅผ ๋งŒ๋“ค์–ด ๋ณด๊ฒ ์Šต๋‹ˆ๋‹ค. ```python def image_grid(imgs, rows, cols): w, h = imgs[0].size grid = Image.new("RGB", size=(cols * w, rows * h)) for i, img in enumerate(imgs): grid.paste(img, box=(i % cols * w, i // cols * h)) return grid ``` ```python image_grid(images, 2, 4) ``` ![img](https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/stable_diffusion_jax_how_to_cell_38_output_0.jpeg) ## ๋‹ค๋ฅธ ํ”„๋กฌํ”„ํŠธ ์‚ฌ์šฉ ๋ชจ๋“  ๋””๋ฐ”์ด์Šค์—์„œ ๋™์ผํ•œ ํ”„๋กฌํ”„ํŠธ๋ฅผ ๋ณต์ œํ•  ํ•„์š”๋Š” ์—†์Šต๋‹ˆ๋‹ค. ํ”„๋กฌํ”„ํŠธ 2๊ฐœ๋ฅผ ๊ฐ๊ฐ 4๋ฒˆ์”ฉ ์ƒ์„ฑํ•˜๊ฑฐ๋‚˜ ํ•œ ๋ฒˆ์— 8๊ฐœ์˜ ์„œ๋กœ ๋‹ค๋ฅธ ํ”„๋กฌํ”„ํŠธ๋ฅผ ์ƒ์„ฑํ•˜๋Š” ๋“ฑ ์›ํ•˜๋Š” ๊ฒƒ์€ ๋ฌด์—‡์ด๋“  ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ํ•œ๋ฒˆ ํ•ด๋ณด์„ธ์š”! ๋จผ์ € ์ž…๋ ฅ ์ค€๋น„ ์ฝ”๋“œ๋ฅผ ํŽธ๋ฆฌํ•œ ํ•จ์ˆ˜๋กœ ๋ฆฌํŒฉํ„ฐ๋งํ•˜๊ฒ ์Šต๋‹ˆ๋‹ค: ```python prompts = [ "Labrador in the style of Hokusai", "Painting of a squirrel skating in New York", "HAL-9000 in the style of Van Gogh", "Times Square under water, with fish and a dolphin swimming around", "Ancient Roman fresco showing a man working on his laptop", "Close-up photograph of young black woman against urban background, high quality, bokeh", "Armchair in the shape of an avocado", "Clown astronaut in space, with Earth in the background", ] ``` ```python prompt_ids = pipeline.prepare_inputs(prompts) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, p_params, rng, jit=True).images images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) images = pipeline.numpy_to_pil(images) image_grid(images, 2, 4) ``` ![img](https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/stable_diffusion_jax_how_to_cell_43_output_0.jpeg) ## ๋ณ‘๋ ฌํ™”(parallelization)๋Š” ์–ด๋–ป๊ฒŒ ์ž‘๋™ํ•˜๋Š”๊ฐ€? ์•ž์„œ `diffusers` Flax ํŒŒ์ดํ”„๋ผ์ธ์ด ๋ชจ๋ธ์„ ์ž๋™์œผ๋กœ ์ปดํŒŒ์ผํ•˜๊ณ  ์‚ฌ์šฉ ๊ฐ€๋Šฅํ•œ ๋ชจ๋“  ๊ธฐ๊ธฐ์—์„œ ๋ณ‘๋ ฌ๋กœ ์‹คํ–‰ํ•œ๋‹ค๊ณ  ๋ง์”€๋“œ๋ ธ์Šต๋‹ˆ๋‹ค. ์ด์ œ ๊ทธ ํ”„๋กœ์„ธ์Šค๋ฅผ ๊ฐ„๋žตํ•˜๊ฒŒ ์‚ดํŽด๋ณด๊ณ  ์ž‘๋™ ๋ฐฉ์‹์„ ๋ณด์—ฌ๋“œ๋ฆฌ๊ฒ ์Šต๋‹ˆ๋‹ค. JAX ๋ณ‘๋ ฌํ™”๋Š” ์—ฌ๋Ÿฌ ๊ฐ€์ง€ ๋ฐฉ๋ฒ•์œผ๋กœ ์ˆ˜ํ–‰ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๊ฐ€์žฅ ์‰ฌ์šด ๋ฐฉ๋ฒ•์€ jax.pmap ํ•จ์ˆ˜๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ๋‹จ์ผ ํ”„๋กœ๊ทธ๋žจ, ๋‹ค์ค‘ ๋ฐ์ดํ„ฐ(SPMD) ๋ณ‘๋ ฌํ™”๋ฅผ ๋‹ฌ์„ฑํ•˜๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค. ์ฆ‰, ๋™์ผํ•œ ์ฝ”๋“œ์˜ ๋ณต์‚ฌ๋ณธ์„ ๊ฐ๊ฐ ๋‹ค๋ฅธ ๋ฐ์ดํ„ฐ ์ž…๋ ฅ์— ๋Œ€ํ•ด ์—ฌ๋Ÿฌ ๊ฐœ ์‹คํ–‰ํ•˜๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค. ๋” ์ •๊ตํ•œ ์ ‘๊ทผ ๋ฐฉ์‹๋„ ๊ฐ€๋Šฅํ•˜๋ฏ€๋กœ ๊ด€์‹ฌ์ด ์žˆ์œผ์‹œ๋‹ค๋ฉด [JAX ๋ฌธ์„œ](https://jax.readthedocs.io/en/latest/index.html)์™€ [`pjit` ํŽ˜์ด์ง€](https://jax.readthedocs.io/en/latest/jax-101/08-pjit.html?highlight=pjit)์—์„œ ์ด ์ฃผ์ œ๋ฅผ ์‚ดํŽด๋ณด์‹œ๊ธฐ ๋ฐ”๋ž๋‹ˆ๋‹ค! `jax.pmap`์€ ๋‘ ๊ฐ€์ง€ ๊ธฐ๋Šฅ์„ ์ˆ˜ํ–‰ํ•ฉ๋‹ˆ๋‹ค: - `jax.jit()`๋ฅผ ํ˜ธ์ถœํ•œ ๊ฒƒ์ฒ˜๋Ÿผ ์ฝ”๋“œ๋ฅผ ์ปดํŒŒ์ผ(๋˜๋Š” `jit`)ํ•ฉ๋‹ˆ๋‹ค. ์ด ์ž‘์—…์€ `pmap`์„ ํ˜ธ์ถœํ•  ๋•Œ๊ฐ€ ์•„๋‹ˆ๋ผ pmapped ํ•จ์ˆ˜๊ฐ€ ์ฒ˜์Œ ํ˜ธ์ถœ๋  ๋•Œ ์ˆ˜ํ–‰๋ฉ๋‹ˆ๋‹ค. - ์ปดํŒŒ์ผ๋œ ์ฝ”๋“œ๊ฐ€ ์‚ฌ์šฉ ๊ฐ€๋Šฅํ•œ ๋ชจ๋“  ๊ธฐ๊ธฐ์—์„œ ๋ณ‘๋ ฌ๋กœ ์‹คํ–‰๋˜๋„๋ก ํ•ฉ๋‹ˆ๋‹ค. ์ž‘๋™ ๋ฐฉ์‹์„ ๋ณด์—ฌ๋“œ๋ฆฌ๊ธฐ ์œ„ํ•ด ์ด๋ฏธ์ง€ ์ƒ์„ฑ์„ ์‹คํ–‰ํ•˜๋Š” ๋น„๊ณต๊ฐœ ๋ฉ”์„œ๋“œ์ธ ํŒŒ์ดํ”„๋ผ์ธ์˜ `_generate` ๋ฉ”์„œ๋“œ๋ฅผ `pmap`ํ•ฉ๋‹ˆ๋‹ค. ์ด ๋ฉ”์„œ๋“œ๋Š” ํ–ฅํ›„ `Diffusers` ๋ฆด๋ฆฌ์Šค์—์„œ ์ด๋ฆ„์ด ๋ณ€๊ฒฝ๋˜๊ฑฐ๋‚˜ ์ œ๊ฑฐ๋  ์ˆ˜ ์žˆ๋‹ค๋Š” ์ ์— ์œ ์˜ํ•˜์„ธ์š”. ```python p_generate = pmap(pipeline._generate) ``` `pmap`์„ ์‚ฌ์šฉํ•œ ํ›„ ์ค€๋น„๋œ ํ•จ์ˆ˜ `p_generate`๋Š” ๊ฐœ๋…์ ์œผ๋กœ ๋‹ค์Œ์„ ์ˆ˜ํ–‰ํ•ฉ๋‹ˆ๋‹ค: * ๊ฐ ์žฅ์น˜์—์„œ ๊ธฐ๋ณธ ํ•จ์ˆ˜ `pipeline._generate`์˜ ๋ณต์‚ฌ๋ณธ์„ ํ˜ธ์ถœํ•ฉ๋‹ˆ๋‹ค. * ๊ฐ ์žฅ์น˜์— ์ž…๋ ฅ ์ธ์ˆ˜์˜ ๋‹ค๋ฅธ ๋ถ€๋ถ„์„ ๋ณด๋ƒ…๋‹ˆ๋‹ค. ์ด๊ฒƒ์ด ๋ฐ”๋กœ ์ƒค๋”ฉ์ด ์‚ฌ์šฉ๋˜๋Š” ์ด์œ ์ž…๋‹ˆ๋‹ค. ์ด ๊ฒฝ์šฐ `prompt_ids`์˜ shape์€ `(8, 1, 77, 768)`์ž…๋‹ˆ๋‹ค. ์ด ๋ฐฐ์—ด์€ 8๊ฐœ๋กœ ๋ถ„ํ• ๋˜๊ณ  `_generate`์˜ ๊ฐ ๋ณต์‚ฌ๋ณธ์€ `(1, 77, 768)`์˜ shape์„ ๊ฐ€์ง„ ์ž…๋ ฅ์„ ๋ฐ›๊ฒŒ ๋ฉ๋‹ˆ๋‹ค. ๋ณ‘๋ ฌ๋กœ ํ˜ธ์ถœ๋œ๋‹ค๋Š” ์‚ฌ์‹ค์„ ์™„์ „ํžˆ ๋ฌด์‹œํ•˜๊ณ  `_generate`๋ฅผ ์ฝ”๋”ฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. batch(๋ฐฐ์น˜) ํฌ๊ธฐ(์ด ์˜ˆ์ œ์—์„œ๋Š” `1`)์™€ ์ฝ”๋“œ์— ์ ํ•ฉํ•œ ์ฐจ์›๋งŒ ์‹ ๊ฒฝ ์“ฐ๋ฉด ๋˜๋ฉฐ, ๋ณ‘๋ ฌ๋กœ ์ž‘๋™ํ•˜๊ธฐ ์œ„ํ•ด ์•„๋ฌด๊ฒƒ๋„ ๋ณ€๊ฒฝํ•  ํ•„์š”๊ฐ€ ์—†์Šต๋‹ˆ๋‹ค. ํŒŒ์ดํ”„๋ผ์ธ ํ˜ธ์ถœ์„ ์‚ฌ์šฉํ•  ๋•Œ์™€ ๋งˆ์ฐฌ๊ฐ€์ง€๋กœ, ๋‹ค์Œ ์…€์„ ์ฒ˜์Œ ์‹คํ–‰ํ•  ๋•Œ๋Š” ์‹œ๊ฐ„์ด ๊ฑธ๋ฆฌ์ง€๋งŒ ๊ทธ ์ดํ›„์—๋Š” ํ›จ์”ฌ ๋นจ๋ผ์ง‘๋‹ˆ๋‹ค. ``` %%time images = p_generate(prompt_ids, p_params, rng) images = images.block_until_ready() images.shape ``` ```python out CPU times: user 1min 15s, sys: 18.2 s, total: 1min 34s Wall time: 1min 15s ``` ```python images.shape ``` ```python out (8, 1, 512, 512, 3) ``` JAX๋Š” ๋น„๋™๊ธฐ ๋””์ŠคํŒจ์น˜๋ฅผ ์‚ฌ์šฉํ•˜๊ณ  ๊ฐ€๋Šฅํ•œ ํ•œ ๋นจ๋ฆฌ ์ œ์–ด๊ถŒ์„ Python ๋ฃจํ”„์— ๋ฐ˜ํ™˜ํ•˜๊ธฐ ๋•Œ๋ฌธ์— ์ถ”๋ก  ์‹œ๊ฐ„์„ ์ •ํ™•ํ•˜๊ฒŒ ์ธก์ •ํ•˜๊ธฐ ์œ„ํ•ด `block_until_ready()`๋ฅผ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค. ์•„์ง ๊ตฌ์ฒดํ™”๋˜์ง€ ์•Š์€ ๊ณ„์‚ฐ ๊ฒฐ๊ณผ๋ฅผ ์‚ฌ์šฉํ•˜๋ ค๋Š” ๊ฒฝ์šฐ ์ž๋™์œผ๋กœ ์ฐจ๋‹จ์ด ์ˆ˜ํ–‰๋˜๋ฏ€๋กœ ์ฝ”๋“œ์—์„œ ์ด ํ•จ์ˆ˜๋ฅผ ์‚ฌ์šฉํ•  ํ•„์š”๊ฐ€ ์—†์Šต๋‹ˆ๋‹ค.
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/using-diffusers/unconditional_image_generation.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Unconditional ์ด๋ฏธ์ง€ ์ƒ์„ฑ [[open-in-colab]] Unconditional ์ด๋ฏธ์ง€ ์ƒ์„ฑ์€ ๋น„๊ต์  ๊ฐ„๋‹จํ•œ ์ž‘์—…์ž…๋‹ˆ๋‹ค. ๋ชจ๋ธ์ด ํ…์ŠคํŠธ๋‚˜ ์ด๋ฏธ์ง€์™€ ๊ฐ™์€ ์ถ”๊ฐ€ ์กฐ๊ฑด ์—†์ด ์ด๋ฏธ ํ•™์Šต๋œ ํ•™์Šต ๋ฐ์ดํ„ฐ์™€ ์œ ์‚ฌํ•œ ์ด๋ฏธ์ง€๋งŒ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค. ['DiffusionPipeline']์€ ์ถ”๋ก ์„ ์œ„ํ•ด ๋ฏธ๋ฆฌ ํ•™์Šต๋œ diffusion ์‹œ์Šคํ…œ์„ ์‚ฌ์šฉํ•˜๋Š” ๊ฐ€์žฅ ์‰ฌ์šด ๋ฐฉ๋ฒ•์ž…๋‹ˆ๋‹ค. ๋จผ์ € ['DiffusionPipeline']์˜ ์ธ์Šคํ„ด์Šค๋ฅผ ์ƒ์„ฑํ•˜๊ณ  ๋‹ค์šด๋กœ๋“œํ•  ํŒŒ์ดํ”„๋ผ์ธ์˜ [์ฒดํฌํฌ์ธํŠธ](https://huggingface.co/models?library=diffusers&sort=downloads)๋ฅผ ์ง€์ •ํ•ฉ๋‹ˆ๋‹ค. ํ—ˆ๋ธŒ์˜ ๐Ÿงจ diffusion ์ฒดํฌํฌ์ธํŠธ ์ค‘ ํ•˜๋‚˜๋ฅผ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค(์‚ฌ์šฉํ•  ์ฒดํฌํฌ์ธํŠธ๋Š” ๋‚˜๋น„ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค). <Tip> ๐Ÿ’ก ๋‚˜๋งŒ์˜ unconditional ์ด๋ฏธ์ง€ ์ƒ์„ฑ ๋ชจ๋ธ์„ ํ•™์Šต์‹œํ‚ค๊ณ  ์‹ถ์œผ์‹ ๊ฐ€์š”? ํ•™์Šต ๊ฐ€์ด๋“œ๋ฅผ ์‚ดํŽด๋ณด๊ณ  ๋‚˜๋งŒ์˜ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ์•Œ์•„๋ณด์„ธ์š”. </Tip> ์ด ๊ฐ€์ด๋“œ์—์„œ๋Š” unconditional ์ด๋ฏธ์ง€ ์ƒ์„ฑ์— ['DiffusionPipeline']๊ณผ [DDPM](https://arxiv.org/abs/2006.11239)์„ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค: ```python >>> from diffusers import DiffusionPipeline >>> generator = DiffusionPipeline.from_pretrained("anton-l/ddpm-butterflies-128") ``` [diffusion ํŒŒ์ดํ”„๋ผ์ธ]์€ ๋ชจ๋“  ๋ชจ๋ธ๋ง, ํ† ํฐํ™”, ์Šค์ผ€์ค„๋ง ๊ตฌ์„ฑ ์š”์†Œ๋ฅผ ๋‹ค์šด๋กœ๋“œํ•˜๊ณ  ์บ์‹œํ•ฉ๋‹ˆ๋‹ค. ์ด ๋ชจ๋ธ์€ ์•ฝ 14์–ต ๊ฐœ์˜ ํŒŒ๋ผ๋ฏธํ„ฐ๋กœ ๊ตฌ์„ฑ๋˜์–ด ์žˆ๊ธฐ ๋•Œ๋ฌธ์— GPU์—์„œ ์‹คํ–‰ํ•  ๊ฒƒ์„ ๊ฐ•๋ ฅํžˆ ๊ถŒ์žฅํ•ฉ๋‹ˆ๋‹ค. PyTorch์—์„œ์™€ ๋งˆ์ฐฌ๊ฐ€์ง€๋กœ ์ œ๋„ˆ๋ ˆ์ดํ„ฐ ๊ฐ์ฒด๋ฅผ GPU๋กœ ์˜ฎ๊ธธ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```python >>> generator.to("cuda") ``` ์ด์ œ ์ œ๋„ˆ๋ ˆ์ดํ„ฐ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```python >>> image = generator().images[0] ``` ์ถœ๋ ฅ์€ ๊ธฐ๋ณธ์ ์œผ๋กœ [PIL.Image](https://pillow.readthedocs.io/en/stable/reference/Image.html?highlight=image#the-image-class) ๊ฐ์ฒด๋กœ ๊ฐ์‹ธ์ง‘๋‹ˆ๋‹ค. ๋‹ค์Œ์„ ํ˜ธ์ถœํ•˜์—ฌ ์ด๋ฏธ์ง€๋ฅผ ์ €์žฅํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```python >>> image.save("generated_image.png") ``` ์•„๋ž˜ ์ŠคํŽ˜์ด์Šค(๋ฐ๋ชจ ๋งํฌ)๋ฅผ ์ด์šฉํ•ด ๋ณด๊ณ , ์ถ”๋ก  ๋‹จ๊ณ„์˜ ๋งค๊ฐœ๋ณ€์ˆ˜๋ฅผ ์ž์œ ๋กญ๊ฒŒ ์กฐ์ ˆํ•˜์—ฌ ์ด๋ฏธ์ง€ ํ’ˆ์งˆ์— ์–ด๋–ค ์˜ํ–ฅ์„ ๋ฏธ์น˜๋Š”์ง€ ํ™•์ธํ•ด ๋ณด์„ธ์š”! <iframe src="https://stevhliu-ddpm-butterflies-128.hf.space" frameborder="0" width="850" height="500"></iframe>
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/using-diffusers/reproducibility.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ์žฌํ˜„ ๊ฐ€๋Šฅํ•œ ํŒŒ์ดํ”„๋ผ์ธ ์ƒ์„ฑํ•˜๊ธฐ [[open-in-colab]] ์žฌํ˜„์„ฑ์€ ํ…Œ์ŠคํŠธ, ๊ฒฐ๊ณผ ์žฌํ˜„, ๊ทธ๋ฆฌ๊ณ  [์ด๋ฏธ์ง€ ํ€„๋ฆฌํ‹ฐ ๋†’์ด๊ธฐ](resuing_seeds)์—์„œ ์ค‘์š”ํ•ฉ๋‹ˆ๋‹ค. ๊ทธ๋Ÿฌ๋‚˜ diffusion ๋ชจ๋ธ์˜ ๋ฌด์ž‘์œ„์„ฑ์€ ๋งค๋ฒˆ ๋ชจ๋ธ์ด ๋Œ์•„๊ฐˆ ๋•Œ๋งˆ๋‹ค ํŒŒ์ดํ”„๋ผ์ธ์ด ๋‹ค๋ฅธ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•  ์ˆ˜ ์žˆ๋„๋ก ํ•˜๋Š” ์ด์œ ๋กœ ํ•„์š”ํ•ฉ๋‹ˆ๋‹ค. ํ”Œ๋žซํผ ๊ฐ„์— ์ •ํ™•ํ•˜๊ฒŒ ๋™์ผํ•œ ๊ฒฐ๊ณผ๋ฅผ ์–ป์„ ์ˆ˜๋Š” ์—†์ง€๋งŒ, ํŠน์ • ํ—ˆ์šฉ ๋ฒ”์œ„ ๋‚ด์—์„œ ๋ฆด๋ฆฌ์Šค ๋ฐ ํ”Œ๋žซํผ ๊ฐ„์— ๊ฒฐ๊ณผ๋ฅผ ์žฌํ˜„ํ•  ์ˆ˜๋Š” ์žˆ์Šต๋‹ˆ๋‹ค. ๊ทธ๋Ÿผ์—๋„ diffusion ํŒŒ์ดํ”„๋ผ์ธ๊ณผ ์ฒดํฌํฌ์ธํŠธ์— ๋”ฐ๋ผ ํ—ˆ์šฉ ์˜ค์ฐจ๊ฐ€ ๋‹ฌ๋ผ์ง‘๋‹ˆ๋‹ค. diffusion ๋ชจ๋ธ์—์„œ ๋ฌด์ž‘์œ„์„ฑ์˜ ์›์ฒœ์„ ์ œ์–ดํ•˜๊ฑฐ๋‚˜ ๊ฒฐ์ •๋ก ์  ์•Œ๊ณ ๋ฆฌ์ฆ˜์„ ์‚ฌ์šฉํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ์ดํ•ดํ•˜๋Š” ๊ฒƒ์ด ์ค‘์š”ํ•œ ์ด์œ ์ž…๋‹ˆ๋‹ค. <Tip> ๐Ÿ’ก Pytorch์˜ [์žฌํ˜„์„ฑ์— ๋Œ€ํ•œ ์„ ์–ธ](https://pytorch.org/docs/stable/notes/randomness.html)๋ฅผ ๊ผญ ์ฝ์–ด๋ณด๊ธธ ์ถ”์ฒœํ•ฉ๋‹ˆ๋‹ค: > ์™„์ „ํ•˜๊ฒŒ ์žฌํ˜„๊ฐ€๋Šฅํ•œ ๊ฒฐ๊ณผ๋Š” Pytorch ๋ฐฐํฌ, ๊ฐœ๋ณ„์ ์ธ ์ปค๋ฐ‹, ํ˜น์€ ๋‹ค๋ฅธ ํ”Œ๋žซํผ๋“ค์—์„œ ๋ณด์žฅ๋˜์ง€ ์•Š์Šต๋‹ˆ๋‹ค. > ๋˜ํ•œ, ๊ฒฐ๊ณผ๋Š” CPU์™€ GPU ์‹คํ–‰๊ฐ„์— ์‹ฌ์ง€์–ด ๊ฐ™์€ seed๋ฅผ ์‚ฌ์šฉํ•  ๋•Œ๋„ ์žฌํ˜„ ๊ฐ€๋Šฅํ•˜์ง€ ์•Š์„ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. </Tip> ## ๋ฌด์ž‘์œ„์„ฑ ์ œ์–ดํ•˜๊ธฐ ์ถ”๋ก ์—์„œ, ํŒŒ์ดํ”„๋ผ์ธ์€ ๋…ธ์ด์ฆˆ๋ฅผ ์ค„์ด๊ธฐ ์œ„ํ•ด ๊ฐ€์šฐ์‹œ์•ˆ ๋…ธ์ด์ฆˆ๋ฅผ ์ƒ์„ฑํ•˜๊ฑฐ๋‚˜ ์Šค์ผ€์ค„๋ง ๋‹จ๊ณ„์— ๋…ธ์ด์ฆˆ๋ฅผ ๋”ํ•˜๋Š” ๋“ฑ์˜ ๋žœ๋ค ์ƒ˜ํ”Œ๋ง ์‹คํ–‰์— ํฌ๊ฒŒ ์˜์กดํ•ฉ๋‹ˆ๋‹ค, [DDIMPipeline](https://huggingface.co/docs/diffusers/v0.18.0/en/api/pipelines/ddim#diffusers.DDIMPipeline)์—์„œ ๋‘ ์ถ”๋ก  ๋‹จ๊ณ„ ์ดํ›„์˜ ํ…์„œ ๊ฐ’์„ ์‚ดํŽด๋ณด์„ธ์š”: ```python from diffusers import DDIMPipeline import numpy as np model_id = "google/ddpm-cifar10-32" # ๋ชจ๋ธ๊ณผ ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ๋ถˆ๋Ÿฌ์˜ค๊ธฐ ddim = DDIMPipeline.from_pretrained(model_id) # ๋‘ ๊ฐœ์˜ ๋‹จ๊ณ„์— ๋Œ€ํ•ด์„œ ํŒŒ์ดํ”„๋ผ์ธ์„ ์‹คํ–‰ํ•˜๊ณ  numpy tensor๋กœ ๊ฐ’์„ ๋ฐ˜ํ™˜ํ•˜๊ธฐ image = ddim(num_inference_steps=2, output_type="np").images print(np.abs(image).sum()) ``` ์œ„์˜ ์ฝ”๋“œ๋ฅผ ์‹คํ–‰ํ•˜๋ฉด ํ•˜๋‚˜์˜ ๊ฐ’์ด ๋‚˜์˜ค์ง€๋งŒ, ๋‹ค์‹œ ์‹คํ–‰ํ•˜๋ฉด ๋‹ค๋ฅธ ๊ฐ’์ด ๋‚˜์˜ต๋‹ˆ๋‹ค. ๋ฌด์Šจ ์ผ์ด ์ผ์–ด๋‚˜๊ณ  ์žˆ๋Š” ๊ฑธ๊นŒ์š”? ํŒŒ์ดํ”„๋ผ์ธ์ด ์‹คํ–‰๋  ๋•Œ๋งˆ๋‹ค, [torch.randn](https://pytorch.org/docs/stable/generated/torch.randn.html)์€ ๋‹จ๊ณ„์ ์œผ๋กœ ๋…ธ์ด์ฆˆ ์ œ๊ฑฐ๋˜๋Š” ๊ฐ€์šฐ์‹œ์•ˆ ๋…ธ์ด์ฆˆ๊ฐ€ ์ƒ์„ฑํ•˜๊ธฐ ์œ„ํ•œ ๋‹ค๋ฅธ ๋žœ๋ค seed๋ฅผ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค. ๊ทธ๋Ÿฌ๋‚˜ ๋™์ผํ•œ ์ด๋ฏธ์ง€๋ฅผ ์•ˆ์ •์ ์œผ๋กœ ์ƒ์„ฑํ•ด์•ผ ํ•˜๋Š” ๊ฒฝ์šฐ์—๋Š” CPU์—์„œ ํŒŒ์ดํ”„๋ผ์ธ์„ ์‹คํ–‰ํ•˜๋Š”์ง€ GPU์—์„œ ์‹คํ–‰ํ•˜๋Š”์ง€์— ๋”ฐ๋ผ ๋‹ฌ๋ผ์ง‘๋‹ˆ๋‹ค. ### CPU CPU์—์„œ ์žฌํ˜„ ๊ฐ€๋Šฅํ•œ ๊ฒฐ๊ณผ๋ฅผ ์ƒ์„ฑํ•˜๋ ค๋ฉด, PyTorch [Generator](https://pytorch.org/docs/stable/generated/torch.randn.html)๋กœ seed๋ฅผ ๊ณ ์ •ํ•ฉ๋‹ˆ๋‹ค: ```python import torch from diffusers import DDIMPipeline import numpy as np model_id = "google/ddpm-cifar10-32" # ๋ชจ๋ธ๊ณผ ์Šค์ผ€์ค„๋Ÿฌ ๋ถˆ๋Ÿฌ์˜ค๊ธฐ ddim = DDIMPipeline.from_pretrained(model_id) # ์žฌํ˜„์„ฑ์„ ์œ„ํ•ด generator ๋งŒ๋“ค๊ธฐ generator = torch.Generator(device="cpu").manual_seed(0) # ๋‘ ๊ฐœ์˜ ๋‹จ๊ณ„์— ๋Œ€ํ•ด์„œ ํŒŒ์ดํ”„๋ผ์ธ์„ ์‹คํ–‰ํ•˜๊ณ  numpy tensor๋กœ ๊ฐ’์„ ๋ฐ˜ํ™˜ํ•˜๊ธฐ image = ddim(num_inference_steps=2, output_type="np", generator=generator).images print(np.abs(image).sum()) ``` ์ด์ œ ์œ„์˜ ์ฝ”๋“œ๋ฅผ ์‹คํ–‰ํ•˜๋ฉด seed๋ฅผ ๊ฐ€์ง„ `Generator` ๊ฐ์ฒด๊ฐ€ ํŒŒ์ดํ”„๋ผ์ธ์˜ ๋ชจ๋“  ๋žœ๋ค ํ•จ์ˆ˜์— ์ „๋‹ฌ๋˜๋ฏ€๋กœ ํ•ญ์ƒ `1491.1711` ๊ฐ’์ด ์ถœ๋ ฅ๋ฉ๋‹ˆ๋‹ค. ํŠน์ • ํ•˜๋“œ์›จ์–ด ๋ฐ PyTorch ๋ฒ„์ „์—์„œ ์ด ์ฝ”๋“œ ์˜ˆ์ œ๋ฅผ ์‹คํ–‰ํ•˜๋ฉด ๋™์ผํ•˜์ง€๋Š” ์•Š๋”๋ผ๋„ ์œ ์‚ฌํ•œ ๊ฒฐ๊ณผ๋ฅผ ์–ป์„ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. <Tip> ๐Ÿ’ก ์ฒ˜์Œ์—๋Š” ์‹œ๋“œ๋ฅผ ๋‚˜ํƒ€๋‚ด๋Š” ์ •์ˆ˜๊ฐ’ ๋Œ€์‹ ์— `Generator` ๊ฐœ์ฒด๋ฅผ ํŒŒ์ดํ”„๋ผ์ธ์— ์ „๋‹ฌํ•˜๋Š” ๊ฒƒ์ด ์•ฝ๊ฐ„ ๋น„์ง๊ด€์ ์ผ ์ˆ˜ ์žˆ์ง€๋งŒ, `Generator`๋Š” ์ˆœ์ฐจ์ ์œผ๋กœ ์—ฌ๋Ÿฌ ํŒŒ์ดํ”„๋ผ์ธ์— ์ „๋‹ฌ๋  ์ˆ˜ ์žˆ๋Š” \๋žœ๋ค์ƒํƒœ\์ด๊ธฐ ๋•Œ๋ฌธ์— PyTorch์—์„œ ํ™•๋ฅ ๋ก ์  ๋ชจ๋ธ์„ ๋‹ค๋ฃฐ ๋•Œ ๊ถŒ์žฅ๋˜๋Š” ์„ค๊ณ„์ž…๋‹ˆ๋‹ค. </Tip> ### GPU ์˜ˆ๋ฅผ ๋“ค๋ฉด, GPU ์ƒ์—์„œ ๊ฐ™์€ ์ฝ”๋“œ ์˜ˆ์‹œ๋ฅผ ์‹คํ–‰ํ•˜๋ฉด: ```python import torch from diffusers import DDIMPipeline import numpy as np model_id = "google/ddpm-cifar10-32" # ๋ชจ๋ธ๊ณผ ์Šค์ผ€์ค„๋Ÿฌ ๋ถˆ๋Ÿฌ์˜ค๊ธฐ ddim = DDIMPipeline.from_pretrained(model_id) ddim.to("cuda") # ์žฌํ˜„์„ฑ์„ ์œ„ํ•œ generator ๋งŒ๋“ค๊ธฐ generator = torch.Generator(device="cuda").manual_seed(0) # ๋‘ ๊ฐœ์˜ ๋‹จ๊ณ„์— ๋Œ€ํ•ด์„œ ํŒŒ์ดํ”„๋ผ์ธ์„ ์‹คํ–‰ํ•˜๊ณ  numpy tensor๋กœ ๊ฐ’์„ ๋ฐ˜ํ™˜ํ•˜๊ธฐ image = ddim(num_inference_steps=2, output_type="np", generator=generator).images print(np.abs(image).sum()) ``` GPU๊ฐ€ CPU์™€ ๋‹ค๋ฅธ ๋‚œ์ˆ˜ ์ƒ์„ฑ๊ธฐ๋ฅผ ์‚ฌ์šฉํ•˜๊ธฐ ๋•Œ๋ฌธ์— ๋™์ผํ•œ ์‹œ๋“œ๋ฅผ ์‚ฌ์šฉํ•˜๋”๋ผ๋„ ๊ฒฐ๊ณผ๊ฐ€ ๊ฐ™์ง€ ์•Š์Šต๋‹ˆ๋‹ค. ์ด ๋ฌธ์ œ๋ฅผ ํ”ผํ•˜๊ธฐ ์œ„ํ•ด ๐Ÿงจ Diffusers๋Š” CPU์— ์ž„์˜์˜ ๋…ธ์ด์ฆˆ๋ฅผ ์ƒ์„ฑํ•œ ๋‹ค์Œ ํ•„์š”์— ๋”ฐ๋ผ ํ…์„œ๋ฅผ GPU๋กœ ์ด๋™์‹œํ‚ค๋Š” [randn_tensor()](https://huggingface.co/docs/diffusers/v0.18.0/en/api/utilities#diffusers.utils.randn_tensor)๊ธฐ๋Šฅ์„ ๊ฐ€์ง€๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค. `randn_tensor` ๊ธฐ๋Šฅ์€ ํŒŒ์ดํ”„๋ผ์ธ ๋‚ด๋ถ€ ์–ด๋””์—์„œ๋‚˜ ์‚ฌ์šฉ๋˜๋ฏ€๋กœ ํŒŒ์ดํ”„๋ผ์ธ์ด GPU์—์„œ ์‹คํ–‰๋˜๋”๋ผ๋„ **ํ•ญ์ƒ** CPU `Generator`๋ฅผ ํ†ต๊ณผํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด์ œ ๊ฒฐ๊ณผ์— ํ›จ์”ฌ ๋” ๋‹ค๊ฐ€์™”์Šต๋‹ˆ๋‹ค! ```python import torch from diffusers import DDIMPipeline import numpy as np model_id = "google/ddpm-cifar10-32" # ๋ชจ๋ธ๊ณผ ์Šค์ผ€์ค„๋Ÿฌ ๋ถˆ๋Ÿฌ์˜ค๊ธฐ ddim = DDIMPipeline.from_pretrained(model_id) ddim.to("cuda") #์žฌํ˜„์„ฑ์„ ์œ„ํ•œ generator ๋งŒ๋“ค๊ธฐ (GPU์— ์˜ฌ๋ฆฌ์ง€ ์•Š๋„๋ก ์กฐ์‹ฌํ•œ๋‹ค!) generator = torch.manual_seed(0) # ๋‘ ๊ฐœ์˜ ๋‹จ๊ณ„์— ๋Œ€ํ•ด์„œ ํŒŒ์ดํ”„๋ผ์ธ์„ ์‹คํ–‰ํ•˜๊ณ  numpy tensor๋กœ ๊ฐ’์„ ๋ฐ˜ํ™˜ํ•˜๊ธฐ image = ddim(num_inference_steps=2, output_type="np", generator=generator).images print(np.abs(image).sum()) ``` <Tip> ๐Ÿ’ก ์žฌํ˜„์„ฑ์ด ์ค‘์š”ํ•œ ๊ฒฝ์šฐ์—๋Š” ํ•ญ์ƒ CPU generator๋ฅผ ์ „๋‹ฌํ•˜๋Š” ๊ฒƒ์ด ์ข‹์Šต๋‹ˆ๋‹ค. ์„ฑ๋Šฅ ์†์‹ค์€ ๋ฌด์‹œํ•  ์ˆ˜ ์—†๋Š” ๊ฒฝ์šฐ๊ฐ€ ๋งŽ์œผ๋ฉฐ ํŒŒ์ดํ”„๋ผ์ธ์ด GPU์—์„œ ์‹คํ–‰๋˜์—ˆ์„ ๋•Œ๋ณด๋‹ค ํ›จ์”ฌ ๋” ๋น„์Šทํ•œ ๊ฐ’์„ ์ƒ์„ฑํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. </Tip> ๋งˆ์ง€๋ง‰์œผ๋กœ [UnCLIPPipeline](https://huggingface.co/docs/diffusers/v0.18.0/en/api/pipelines/unclip#diffusers.UnCLIPPipeline)๊ณผ ๊ฐ™์€ ๋” ๋ณต์žกํ•œ ํŒŒ์ดํ”„๋ผ์ธ์˜ ๊ฒฝ์šฐ, ์ด๋“ค์€ ์ข…์ข… ์ •๋ฐ€ ์˜ค์ฐจ ์ „ํŒŒ์— ๊ทน๋„๋กœ ์ทจ์•ฝํ•ฉ๋‹ˆ๋‹ค. ๋‹ค๋ฅธ GPU ํ•˜๋“œ์›จ์–ด ๋˜๋Š” PyTorch ๋ฒ„์ „์—์„œ ์œ ์‚ฌํ•œ ๊ฒฐ๊ณผ๋ฅผ ๊ธฐ๋Œ€ํ•˜์ง€ ๋งˆ์„ธ์š”. ์ด ๊ฒฝ์šฐ ์™„์ „ํ•œ ์žฌํ˜„์„ฑ์„ ์œ„ํ•ด ์™„์ „ํžˆ ๋™์ผํ•œ ํ•˜๋“œ์›จ์–ด ๋ฐ PyTorch ๋ฒ„์ „์„ ์‹คํ–‰ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ## ๊ฒฐ์ •๋ก ์  ์•Œ๊ณ ๋ฆฌ์ฆ˜ ๊ฒฐ์ •๋ก ์  ์•Œ๊ณ ๋ฆฌ์ฆ˜์„ ์‚ฌ์šฉํ•˜์—ฌ ์žฌํ˜„ ๊ฐ€๋Šฅํ•œ ํŒŒ์ดํ”„๋ผ์ธ์„ ์ƒ์„ฑํ•˜๋„๋ก PyTorch๋ฅผ ๊ตฌ์„ฑํ•  ์ˆ˜๋„ ์žˆ์Šต๋‹ˆ๋‹ค. ๊ทธ๋Ÿฌ๋‚˜ ๊ฒฐ์ •๋ก ์  ์•Œ๊ณ ๋ฆฌ์ฆ˜์€ ๋น„๊ฒฐ์ •๋ก ์  ์•Œ๊ณ ๋ฆฌ์ฆ˜๋ณด๋‹ค ๋А๋ฆฌ๊ณ  ์„ฑ๋Šฅ์ด ์ €ํ•˜๋  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ํ•˜์ง€๋งŒ ์žฌํ˜„์„ฑ์ด ์ค‘์š”ํ•˜๋‹ค๋ฉด, ์ด๊ฒƒ์ด ์ตœ์„ ์˜ ๋ฐฉ๋ฒ•์ž…๋‹ˆ๋‹ค! ๋‘˜ ์ด์ƒ์˜ CUDA ์ŠคํŠธ๋ฆผ์—์„œ ์ž‘์—…์ด ์‹œ์ž‘๋  ๋•Œ ๋น„๊ฒฐ์ •๋ก ์  ๋™์ž‘์ด ๋ฐœ์ƒํ•ฉ๋‹ˆ๋‹ค. ์ด ๋ฌธ์ œ๋ฅผ ๋ฐฉ์ง€ํ•˜๋ ค๋ฉด ํ™˜๊ฒฝ ๋ณ€์ˆ˜ [CUBLAS_WORKSPACE_CONFIG](https://docs.nvidia.com/cuda/cublas/index.html#results-reproducibility)๋ฅผ `:16:8`๋กœ ์„ค์ •ํ•ด์„œ ๋Ÿฐํƒ€์ž„ ์ค‘์— ์˜ค์ง ํ•˜๋‚˜์˜ ๋ฒ„ํผ ํฌ๋ฆฌ๋งŒ ์‚ฌ์šฉํ•˜๋„๋ก ์„ค์ •ํ•ฉ๋‹ˆ๋‹ค. PyTorch๋Š” ์ผ๋ฐ˜์ ์œผ๋กœ ๊ฐ€์žฅ ๋น ๋ฅธ ์•Œ๊ณ ๋ฆฌ์ฆ˜์„ ์„ ํƒํ•˜๊ธฐ ์œ„ํ•ด ์—ฌ๋Ÿฌ ์•Œ๊ณ ๋ฆฌ์ฆ˜์„ ๋ฒค์น˜๋งˆํ‚นํ•ฉ๋‹ˆ๋‹ค. ํ•˜์ง€๋งŒ ์žฌํ˜„์„ฑ์„ ์›ํ•˜๋Š” ๊ฒฝ์šฐ, ๋ฒค์น˜๋งˆํฌ๊ฐ€ ๋งค ์ˆœ๊ฐ„ ๋‹ค๋ฅธ ์•Œ๊ณ ๋ฆฌ์ฆ˜์„ ์„ ํƒํ•  ์ˆ˜ ์žˆ๊ธฐ ๋•Œ๋ฌธ์— ์ด ๊ธฐ๋Šฅ์„ ์‚ฌ์šฉํ•˜์ง€ ์•Š๋„๋ก ์„ค์ •ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ๋งˆ์ง€๋ง‰์œผ๋กœ, [torch.use_deterministic_algorithms](https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html)์— `True`๋ฅผ ํ†ต๊ณผ์‹œ์ผœ ๊ฒฐ์ •๋ก ์  ์•Œ๊ณ ๋ฆฌ์ฆ˜์ด ํ™œ์„ฑํ™” ๋˜๋„๋ก ํ•ฉ๋‹ˆ๋‹ค. ```py import os os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8" torch.backends.cudnn.benchmark = False torch.use_deterministic_algorithms(True) ``` ์ด์ œ ๋™์ผํ•œ ํŒŒ์ดํ”„๋ผ์ธ์„ ๋‘๋ฒˆ ์‹คํ–‰ํ•˜๋ฉด ๋™์ผํ•œ ๊ฒฐ๊ณผ๋ฅผ ์–ป์„ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ```py import torch from diffusers import DDIMScheduler, StableDiffusionPipeline import numpy as np model_id = "runwayml/stable-diffusion-v1-5" pipe = StableDiffusionPipeline.from_pretrained(model_id).to("cuda") pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) g = torch.Generator(device="cuda") prompt = "A bear is playing a guitar on Times Square" g.manual_seed(0) result1 = pipe(prompt=prompt, num_inference_steps=50, generator=g, output_type="latent").images g.manual_seed(0) result2 = pipe(prompt=prompt, num_inference_steps=50, generator=g, output_type="latent").images print("L_inf dist = ", abs(result1 - result2).max()) "L_inf dist = tensor(0., device='cuda:0')" ```
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/using-diffusers/control_brightness.md
# ์ด๋ฏธ์ง€ ๋ฐ๊ธฐ ์กฐ์ ˆํ•˜๊ธฐ Stable Diffusion ํŒŒ์ดํ”„๋ผ์ธ์€ [์ผ๋ฐ˜์ ์ธ ๋””ํ“จ์ „ ๋…ธ์ด์ฆˆ ์Šค์ผ€์ค„๊ณผ ์ƒ˜ํ”Œ ๋‹จ๊ณ„์— ๊ฒฐํ•จ์ด ์žˆ์Œ](https://huggingface.co/papers/2305.08891) ๋…ผ๋ฌธ์—์„œ ์„ค๋ช…ํ•œ ๊ฒƒ์ฒ˜๋Ÿผ ๋งค์šฐ ๋ฐ๊ฑฐ๋‚˜ ์–ด๋‘์šด ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•˜๋Š” ๋ฐ๋Š” ์„ฑ๋Šฅ์ด ํ‰๋ฒ”ํ•ฉ๋‹ˆ๋‹ค. ์ด ๋…ผ๋ฌธ์—์„œ ์ œ์•ˆํ•œ ์†”๋ฃจ์…˜์€ ํ˜„์žฌ [`DDIMScheduler`]์— ๊ตฌํ˜„๋˜์–ด ์žˆ์œผ๋ฉฐ ์ด๋ฏธ์ง€์˜ ๋ฐ๊ธฐ๋ฅผ ๊ฐœ์„ ํ•˜๋Š” ๋ฐ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. <Tip> ๐Ÿ’ก ์ œ์•ˆ๋œ ์†”๋ฃจ์…˜์— ๋Œ€ํ•œ ์ž์„ธํ•œ ๋‚ด์šฉ์€ ์œ„์— ๋งํฌ๋œ ๋…ผ๋ฌธ์„ ์ฐธ๊ณ ํ•˜์„ธ์š”! </Tip> ํ•ด๊ฒฐ์ฑ… ์ค‘ ํ•˜๋‚˜๋Š” *v ์˜ˆ์ธก๊ฐ’*๊ณผ *v ๋กœ์Šค*๋กœ ๋ชจ๋ธ์„ ํ›ˆ๋ จํ•˜๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค. ๋‹ค์Œ flag๋ฅผ [`train_text_to_image.py`](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py) ๋˜๋Š” [`train_text_to_image_lora.py`](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py) ์Šคํฌ๋ฆฝํŠธ์— ์ถ”๊ฐ€ํ•˜์—ฌ `v_prediction`์„ ํ™œ์„ฑํ™”ํ•ฉ๋‹ˆ๋‹ค: ```bash --prediction_type="v_prediction" ``` ์˜ˆ๋ฅผ ๋“ค์–ด, `v_prediction`์œผ๋กœ ๋ฏธ์„ธ ์กฐ์ •๋œ [`ptx0/pseudo-journey-v2`](https://huggingface.co/ptx0/pseudo-journey-v2) ์ฒดํฌํฌ์ธํŠธ๋ฅผ ์‚ฌ์šฉํ•ด ๋ณด๊ฒ ์Šต๋‹ˆ๋‹ค. ๋‹ค์Œ์œผ๋กœ [`DDIMScheduler`]์—์„œ ๋‹ค์Œ ํŒŒ๋ผ๋ฏธํ„ฐ๋ฅผ ์„ค์ •ํ•ฉ๋‹ˆ๋‹ค: 1. rescale_betas_zero_snr=True`, ๋…ธ์ด์ฆˆ ์Šค์ผ€์ค„์„ ์ œ๋กœ ํ„ฐ๋ฏธ๋„ ์‹ ํ˜ธ ๋Œ€ ์žก์Œ๋น„(SNR)๋กœ ์žฌ์กฐ์ •ํ•ฉ๋‹ˆ๋‹ค. 2. `timestep_spacing="trailing"`, ๋งˆ์ง€๋ง‰ ํƒ€์ž„์Šคํ…๋ถ€ํ„ฐ ์ƒ˜ํ”Œ๋ง ์‹œ์ž‘ ```py >>> from diffusers import DiffusionPipeline, DDIMScheduler >>> pipeline = DiffusionPipeline.from_pretrained("ptx0/pseudo-journey-v2") # switch the scheduler in the pipeline to use the DDIMScheduler >>> pipeline.scheduler = DDIMScheduler.from_config( ... pipeline.scheduler.config, rescale_betas_zero_snr=True, timestep_spacing="trailing" ... ) >>> pipeline.to("cuda") ``` ๋งˆ์ง€๋ง‰์œผ๋กœ ํŒŒ์ดํ”„๋ผ์ธ์— ๋Œ€ํ•œ ํ˜ธ์ถœ์—์„œ `guidance_rescale`์„ ์„ค์ •ํ•˜์—ฌ ๊ณผ๋‹ค ๋…ธ์ถœ์„ ๋ฐฉ์ง€ํ•ฉ๋‹ˆ๋‹ค: ```py prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k" image = pipeline(prompt, guidance_rescale=0.7).images[0] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/zero_snr.png"/> </div>
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/using-diffusers/weighted_prompts.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ํ”„๋กฌํ”„ํŠธ์— ๊ฐ€์ค‘์น˜ ๋ถ€์—ฌํ•˜๊ธฐ [[open-in-colab]] ํ…์ŠคํŠธ ๊ฐ€์ด๋“œ ๊ธฐ๋ฐ˜์˜ diffusion ๋ชจ๋ธ์€ ์ฃผ์–ด์ง„ ํ…์ŠคํŠธ ํ”„๋กฌํ”„ํŠธ๋ฅผ ๊ธฐ๋ฐ˜์œผ๋กœ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค. ํ…์ŠคํŠธ ํ”„๋กฌํ”„ํŠธ์—๋Š” ๋ชจ๋ธ์ด ์ƒ์„ฑํ•ด์•ผ ํ•˜๋Š” ์—ฌ๋Ÿฌ ๊ฐœ๋…์ด ํฌํ•จ๋  ์ˆ˜ ์žˆ์œผ๋ฉฐ ํ”„๋กฌํ”„ํŠธ์˜ ํŠน์ • ๋ถ€๋ถ„์— ๊ฐ€์ค‘์น˜๋ฅผ ๋ถ€์—ฌํ•˜๋Š” ๊ฒƒ์ด ๋ฐ”๋žŒ์งํ•œ ๊ฒฝ์šฐ๊ฐ€ ๋งŽ์Šต๋‹ˆ๋‹ค. Diffusion ๋ชจ๋ธ์€ ๋ฌธ๋งฅํ™”๋œ ํ…์ŠคํŠธ ์ž„๋ฒ ๋”ฉ์œผ๋กœ diffusion ๋ชจ๋ธ์˜ cross attention ๋ ˆ์ด์–ด๋ฅผ ์กฐ์ ˆํ•จ์œผ๋กœ์จ ์ž‘๋™ํ•ฉ๋‹ˆ๋‹ค. ([๋” ๋งŽ์€ ์ •๋ณด๋ฅผ ์œ„ํ•œ Stable Diffusion Guide](https://huggingface.co/docs/optimum-neuron/main/en/package_reference/modeling#stable-diffusion)๋ฅผ ์ฐธ๊ณ ํ•˜์„ธ์š”). ๋”ฐ๋ผ์„œ ํ”„๋กฌํ”„ํŠธ์˜ ํŠน์ • ๋ถ€๋ถ„์„ ๊ฐ•์กฐํ•˜๋Š”(๋˜๋Š” ๊ฐ•์กฐํ•˜์ง€ ์•Š๋Š”) ๊ฐ„๋‹จํ•œ ๋ฐฉ๋ฒ•์€ ํ”„๋กฌํ”„ํŠธ์˜ ๊ด€๋ จ ๋ถ€๋ถ„์— ํ•ด๋‹นํ•˜๋Š” ํ…์ŠคํŠธ ์ž„๋ฒ ๋”ฉ ๋ฒกํ„ฐ์˜ ํฌ๊ธฐ๋ฅผ ๋Š˜๋ฆฌ๊ฑฐ๋‚˜ ์ค„์ด๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค. ์ด๊ฒƒ์€ "ํ”„๋กฌํ”„ํŠธ ๊ฐ€์ค‘์น˜ ๋ถ€์—ฌ" ๋ผ๊ณ  ํ•˜๋ฉฐ, ์ปค๋ฎค๋‹ˆํ‹ฐ์—์„œ ๊ฐ€์žฅ ์š”๊ตฌํ•˜๋Š” ๊ธฐ๋Šฅ์ž…๋‹ˆ๋‹ค.([์ด๊ณณ](https://github.com/huggingface/diffusers/issues/2431)์˜ issue๋ฅผ ๋ณด์„ธ์š” ). ## Diffusers์—์„œ ํ”„๋กฌํ”„ํŠธ ๊ฐ€์ค‘์น˜ ๋ถ€์—ฌํ•˜๋Š” ๋ฐฉ๋ฒ• ์šฐ๋ฆฌ๋Š” `diffusers`์˜ ์—ญํ• ์ด ๋‹ค๋ฅธ ํ”„๋กœ์ ํŠธ๋ฅผ ๊ฐ€๋Šฅํ•˜๊ฒŒ ํ•˜๋Š” ํ•„์ˆ˜์ ์ธ ๊ธฐ๋Šฅ์„ ์ œ๊ณตํ•˜๋Š” toolbex๋ผ๊ณ  ์ƒ๊ฐํ•ฉ๋‹ˆ๋‹ค. [InvokeAI](https://github.com/invoke-ai/InvokeAI) ๋‚˜ [diffuzers](https://github.com/abhishekkrthakur/diffuzers) ๊ฐ™์€ ๊ฐ•๋ ฅํ•œ UI๋ฅผ ๊ตฌ์ถ•ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ํ”„๋กฌํ”„ํŠธ๋ฅผ ์กฐ์ž‘ํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ์ง€์›ํ•˜๊ธฐ ์œ„ํ•ด, `diffusers` ๋Š” [StableDiffusionPipeline](https://huggingface.co/docs/diffusers/v0.18.2/en/api/pipelines/stable_diffusion/text2img#diffusers.StableDiffusionPipeline)์™€ ๊ฐ™์€ ๋งŽ์€ ํŒŒ์ดํ”„๋ผ์ธ์— [prompt_embeds](https://huggingface.co/docs/diffusers/v0.14.0/en/api/pipelines/stable_diffusion/text2img#diffusers.StableDiffusionPipeline.__call__.prompt_embeds) ์ธ์ˆ˜๋ฅผ ๋…ธ์ถœ์‹œ์ผœ, "prompt-weighted"/์ถ•์ฒ™๋œ ํ…์ŠคํŠธ ์ž„๋ฒ ๋”ฉ์„ ํŒŒ์ดํ”„๋ผ์ธ์— ๋ฐ”๋กœ ์ „๋‹ฌํ•  ์ˆ˜ ์žˆ๊ฒŒ ํ•ฉ๋‹ˆ๋‹ค. [Compel ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ](https://github.com/damian0815/compel)๋Š” ํ”„๋กฌํ”„ํŠธ์˜ ์ผ๋ถ€๋ฅผ ๊ฐ•์กฐํ•˜๊ฑฐ๋‚˜ ๊ฐ•์กฐํ•˜์ง€ ์•Š์„ ์ˆ˜ ์žˆ๋Š” ์‰ฌ์šด ๋ฐฉ๋ฒ•์„ ์ œ๊ณตํ•ฉ๋‹ˆ๋‹ค. ์ž„๋ฒ ๋”ฉ์„ ์ง์ ‘ ์ค€๋น„ํ•˜๋Š” ๊ฒƒ ๋Œ€์‹  ์ด ๋ฐฉ๋ฒ•์„ ์‚ฌ์šฉํ•˜๋Š” ๊ฒƒ์„ ๊ฐ•๋ ฅํžˆ ์ถ”์ฒœํ•ฉ๋‹ˆ๋‹ค. ๊ฐ„๋‹จํ•œ ์˜ˆ์ œ๋ฅผ ์‚ดํŽด๋ณด๊ฒ ์Šต๋‹ˆ๋‹ค. ๋‹ค์Œ๊ณผ ๊ฐ™์ด `"๊ณต์„ ๊ฐ–๊ณ  ๋…ธ๋Š” ๋ถ‰์€์ƒ‰ ๊ณ ์–‘์ด"` ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•˜๊ณ  ์‹ถ์Šต๋‹ˆ๋‹ค: ```py from diffusers import StableDiffusionPipeline, UniPCMultistepScheduler pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) prompt = "a red cat playing with a ball" generator = torch.Generator(device="cpu").manual_seed(33) image = pipe(prompt, generator=generator, num_inference_steps=20).images[0] image ``` ์ƒ์„ฑ๋œ ์ด๋ฏธ์ง€: ![img](https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/compel/forest_0.png) ์‚ฌ์ง„์—์„œ ์•Œ ์ˆ˜ ์žˆ๋“ฏ์ด, "๊ณต"์€ ์ด๋ฏธ์ง€์— ์—†์Šต๋‹ˆ๋‹ค. ์ด ๋ถ€๋ถ„์„ ๊ฐ•์กฐํ•ด ๋ณผ๊นŒ์š”! ๋จผ์ € `compel` ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๋ฅผ ์„ค์น˜ํ•ด์•ผํ•ฉ๋‹ˆ๋‹ค: ``` pip install compel ``` ๊ทธ๋Ÿฐ ๋‹ค์Œ์—๋Š” `Compel` ์˜ค๋ธŒ์ ํŠธ๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค: ```py from compel import Compel compel_proc = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder) ``` ์ด์ œ `"++"` ๋ฅผ ์‚ฌ์šฉํ•ด์„œ "๊ณต" ์„ ๊ฐ•์กฐํ•ด ๋ด…์‹œ๋‹ค: ```py prompt = "a red cat playing with a ball++" ``` ๊ทธ๋ฆฌ๊ณ  ์ด ํ”„๋กฌํ”„ํŠธ๋ฅผ ํŒŒ์ดํ”„๋ผ์ธ์— ๋ฐ”๋กœ ์ „๋‹ฌํ•˜์ง€ ์•Š๊ณ , `compel_proc` ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์ฒ˜๋ฆฌํ•ด์•ผํ•ฉ๋‹ˆ๋‹ค: ```py prompt_embeds = compel_proc(prompt) ``` ํŒŒ์ดํ”„๋ผ์ธ์— `prompt_embeds` ๋ฅผ ๋ฐ”๋กœ ์ „๋‹ฌํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```py generator = torch.Generator(device="cpu").manual_seed(33) images = pipe(prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20).images[0] image ``` ์ด์ œ "๊ณต"์ด ์žˆ๋Š” ๊ทธ๋ฆผ์„ ์ถœ๋ ฅํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค! ![img](https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/compel/forest_1.png) ๋งˆ์ฐฌ๊ฐ€์ง€๋กœ `--` ์ ‘๋ฏธ์‚ฌ๋ฅผ ๋‹จ์–ด์— ์‚ฌ์šฉํ•˜์—ฌ ๋ฌธ์žฅ์˜ ์ผ๋ถ€๋ฅผ ๊ฐ•์กฐํ•˜์ง€ ์•Š์„ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ํ•œ๋ฒˆ ์‹œ๋„ํ•ด ๋ณด์„ธ์š”! ์ฆ๊ฒจ์ฐพ๋Š” ํŒŒ์ดํ”„๋ผ์ธ์— `prompt_embeds` ์ž…๋ ฅ์ด ์—†๋Š” ๊ฒฝ์šฐ issue๋ฅผ ์ƒˆ๋กœ ๋งŒ๋“ค์–ด์ฃผ์„ธ์š”. Diffusers ํŒ€์€ ์ตœ๋Œ€ํ•œ ๋Œ€์‘ํ•˜๋ ค๊ณ  ๋…ธ๋ ฅํ•ฉ๋‹ˆ๋‹ค. Compel 1.1.6 ๋Š” textual inversions์„ ์‚ฌ์šฉํ•˜์—ฌ ๋‹จ์ˆœํ™”ํ•˜๋Š” ์œ ํ‹ฐ๋ฆดํ‹ฐ ํด๋ž˜์Šค๋ฅผ ์ถ”๊ฐ€ํ•ฉ๋‹ˆ๋‹ค. `DiffusersTextualInversionManager`๋ฅผ ์ธ์Šคํ„ด์Šคํ™” ํ•œ ํ›„ ์ด๋ฅผ Compel init์— ์ „๋‹ฌํ•ฉ๋‹ˆ๋‹ค: ``` textual_inversion_manager = DiffusersTextualInversionManager(pipe) compel = Compel( tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder, textual_inversion_manager=textual_inversion_manager) ``` ๋” ๋งŽ์€ ์ •๋ณด๋ฅผ ์–ป๊ณ  ์‹ถ๋‹ค๋ฉด [compel](https://github.com/damian0815/compel) ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ ๋ฌธ์„œ๋ฅผ ์ฐธ๊ณ ํ•˜์„ธ์š”.
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/using-diffusers/contribute_pipeline.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ์— ๊ธฐ์—ฌํ•˜๋Š” ๋ฐฉ๋ฒ• <Tip> ๐Ÿ’ก ๋ชจ๋“  ์‚ฌ๋žŒ์ด ์†๋„ ์ €ํ•˜ ์—†์ด ์‰ฝ๊ฒŒ ์ž‘์—…์„ ๊ณต์œ ํ•  ์ˆ˜ ์žˆ๋„๋ก ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ์„ ์ถ”๊ฐ€ํ•˜๋Š” ์ด์œ ์— ๋Œ€ํ•œ ์ž์„ธํ•œ ๋‚ด์šฉ์€ GitHub ์ด์Šˆ [#841](https://github.com/huggingface/diffusers/issues/841)๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. </Tip> ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ์„ ์‚ฌ์šฉํ•˜๋ฉด [`DiffusionPipeline`] ์œ„์— ์›ํ•˜๋Š” ์ถ”๊ฐ€ ๊ธฐ๋Šฅ์„ ์ถ”๊ฐ€ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. `DiffusionPipeline` ์œ„์— ๊ตฌ์ถ•ํ•  ๋•Œ์˜ ๊ฐ€์žฅ ํฐ ์žฅ์ ์€ ๋ˆ„๊ตฌ๋‚˜ ์ธ์ˆ˜๋ฅผ ํ•˜๋‚˜๋งŒ ์ถ”๊ฐ€ํ•˜๋ฉด ํŒŒ์ดํ”„๋ผ์ธ์„ ๋กœ๋“œํ•˜๊ณ  ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์–ด ์ปค๋ฎค๋‹ˆํ‹ฐ๊ฐ€ ๋งค์šฐ ์‰ฝ๊ฒŒ ์ ‘๊ทผํ•  ์ˆ˜ ์žˆ๋‹ค๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค. ์ด๋ฒˆ ๊ฐ€์ด๋“œ์—์„œ๋Š” ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ์„ ์ƒ์„ฑํ•˜๋Š” ๋ฐฉ๋ฒ•๊ณผ ์ž‘๋™ ์›๋ฆฌ๋ฅผ ์„ค๋ช…ํ•ฉ๋‹ˆ๋‹ค. ๊ฐ„๋‹จํ•˜๊ฒŒ ์„ค๋ช…ํ•˜๊ธฐ ์œ„ํ•ด `UNet`์ด ๋‹จ์ผ forward pass๋ฅผ ์ˆ˜ํ–‰ํ•˜๊ณ  ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ํ•œ ๋ฒˆ ํ˜ธ์ถœํ•˜๋Š” "one-step" ํŒŒ์ดํ”„๋ผ์ธ์„ ๋งŒ๋“ค๊ฒ ์Šต๋‹ˆ๋‹ค. ## ํŒŒ์ดํ”„๋ผ์ธ ์ดˆ๊ธฐํ™” ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ์„ ์œ„ํ•œ `one_step_unet.py` ํŒŒ์ผ์„ ์ƒ์„ฑํ•˜๋Š” ๊ฒƒ์œผ๋กœ ์‹œ์ž‘ํ•ฉ๋‹ˆ๋‹ค. ์ด ํŒŒ์ผ์—์„œ, Hub์—์„œ ๋ชจ๋ธ ๊ฐ€์ค‘์น˜์™€ ์Šค์ผ€์ค„๋Ÿฌ ๊ตฌ์„ฑ์„ ๋กœ๋“œํ•  ์ˆ˜ ์žˆ๋„๋ก [`DiffusionPipeline`]์„ ์ƒ์†ํ•˜๋Š” ํŒŒ์ดํ”„๋ผ์ธ ํด๋ž˜์Šค๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค. one-step ํŒŒ์ดํ”„๋ผ์ธ์—๋Š” `UNet`๊ณผ ์Šค์ผ€์ค„๋Ÿฌ๊ฐ€ ํ•„์š”ํ•˜๋ฏ€๋กœ ์ด๋ฅผ `__init__` ํ•จ์ˆ˜์— ์ธ์ˆ˜๋กœ ์ถ”๊ฐ€ํ•ด์•ผํ•ฉ๋‹ˆ๋‹ค: ```python from diffusers import DiffusionPipeline import torch class UnetSchedulerOneForwardPipeline(DiffusionPipeline): def __init__(self, unet, scheduler): super().__init__() ``` ํŒŒ์ดํ”„๋ผ์ธ๊ณผ ๊ทธ ๊ตฌ์„ฑ์š”์†Œ(`unet` and `scheduler`)๋ฅผ [`~DiffusionPipeline.save_pretrained`]์œผ๋กœ ์ €์žฅํ•  ์ˆ˜ ์žˆ๋„๋ก ํ•˜๋ ค๋ฉด `register_modules` ํ•จ์ˆ˜์— ์ถ”๊ฐ€ํ•˜์„ธ์š”: ```diff from diffusers import DiffusionPipeline import torch class UnetSchedulerOneForwardPipeline(DiffusionPipeline): def __init__(self, unet, scheduler): super().__init__() + self.register_modules(unet=unet, scheduler=scheduler) ``` ์ด์ œ '์ดˆ๊ธฐํ™”' ๋‹จ๊ณ„๊ฐ€ ์™„๋ฃŒ๋˜์—ˆ์œผ๋‹ˆ forward pass๋กœ ์ด๋™ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค! ๐Ÿ”ฅ ## Forward pass ์ •์˜ Forward pass ์—์„œ๋Š”(`__call__`๋กœ ์ •์˜ํ•˜๋Š” ๊ฒƒ์ด ์ข‹์Šต๋‹ˆ๋‹ค) ์›ํ•˜๋Š” ๊ธฐ๋Šฅ์„ ์ถ”๊ฐ€ํ•  ์ˆ˜ ์žˆ๋Š” ์™„์ „ํ•œ ์ฐฝ์ž‘ ์ž์œ ๊ฐ€ ์žˆ์Šต๋‹ˆ๋‹ค. ์šฐ๋ฆฌ์˜ ๋†€๋ผ์šด one-step ํŒŒ์ดํ”„๋ผ์ธ์˜ ๊ฒฝ์šฐ, ์ž„์˜์˜ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•˜๊ณ  `timestep=1`์„ ์„ค์ •ํ•˜์—ฌ `unet`๊ณผ `scheduler`๋ฅผ ํ•œ ๋ฒˆ๋งŒ ํ˜ธ์ถœํ•ฉ๋‹ˆ๋‹ค: ```diff from diffusers import DiffusionPipeline import torch class UnetSchedulerOneForwardPipeline(DiffusionPipeline): def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) + def __call__(self): + image = torch.randn( + (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), + ) + timestep = 1 + model_output = self.unet(image, timestep).sample + scheduler_output = self.scheduler.step(model_output, timestep, image).prev_sample + return scheduler_output ``` ๋๋‚ฌ์Šต๋‹ˆ๋‹ค! ๐Ÿš€ ์ด์ œ ์ด ํŒŒ์ดํ”„๋ผ์ธ์— `unet`๊ณผ `scheduler`๋ฅผ ์ „๋‹ฌํ•˜์—ฌ ์‹คํ–‰ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```python from diffusers import DDPMScheduler, UNet2DModel scheduler = DDPMScheduler() unet = UNet2DModel() pipeline = UnetSchedulerOneForwardPipeline(unet=unet, scheduler=scheduler) output = pipeline() ``` ํ•˜์ง€๋งŒ ํŒŒ์ดํ”„๋ผ์ธ ๊ตฌ์กฐ๊ฐ€ ๋™์ผํ•œ ๊ฒฝ์šฐ ๊ธฐ์กด ๊ฐ€์ค‘์น˜๋ฅผ ํŒŒ์ดํ”„๋ผ์ธ์— ๋กœ๋“œํ•  ์ˆ˜ ์žˆ๋‹ค๋Š” ์žฅ์ ์ด ์žˆ์Šต๋‹ˆ๋‹ค. ์˜ˆ๋ฅผ ๋“ค์–ด one-step ํŒŒ์ดํ”„๋ผ์ธ์— [`google/ddpm-cifar10-32`](https://huggingface.co/google/ddpm-cifar10-32) ๊ฐ€์ค‘์น˜๋ฅผ ๋กœ๋“œํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```python pipeline = UnetSchedulerOneForwardPipeline.from_pretrained("google/ddpm-cifar10-32") output = pipeline() ``` ## ํŒŒ์ดํ”„๋ผ์ธ ๊ณต์œ  ๐ŸงจDiffusers [๋ฆฌํฌ์ง€ํ† ๋ฆฌ](https://github.com/huggingface/diffusers)์—์„œ Pull Request๋ฅผ ์—ด์–ด [examples/community](https://github.com/huggingface/diffusers/tree/main/examples/community) ํ•˜์œ„ ํด๋”์— `one_step_unet.py`์˜ ๋ฉ‹์ง„ ํŒŒ์ดํ”„๋ผ์ธ์„ ์ถ”๊ฐ€ํ•˜์„ธ์š”. ๋ณ‘ํ•ฉ์ด ๋˜๋ฉด, `diffusers >= 0.4.0`์ด ์„ค์น˜๋œ ์‚ฌ์šฉ์ž๋ผ๋ฉด ๋ˆ„๊ตฌ๋‚˜ `custom_pipeline` ์ธ์ˆ˜์— ์ง€์ •ํ•˜์—ฌ ์ด ํŒŒ์ดํ”„๋ผ์ธ์„ ๋งˆ์ˆ ์ฒ˜๋Ÿผ ๐Ÿช„ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```python from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("google/ddpm-cifar10-32", custom_pipeline="one_step_unet") pipe() ``` ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ์„ ๊ณต์œ ํ•˜๋Š” ๋˜ ๋‹ค๋ฅธ ๋ฐฉ๋ฒ•์€ Hub ์—์„œ ์„ ํ˜ธํ•˜๋Š” [๋ชจ๋ธ ๋ฆฌํฌ์ง€ํ† ๋ฆฌ](https://huggingface.co/docs/hub/models-uploading)์— ์ง์ ‘ `one_step_unet.py` ํŒŒ์ผ์„ ์—…๋กœ๋“œํ•˜๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค. `one_step_unet.py` ํŒŒ์ผ์„ ์ง€์ •ํ•˜๋Š” ๋Œ€์‹  ๋ชจ๋ธ ์ €์žฅ์†Œ id๋ฅผ `custom_pipeline` ์ธ์ˆ˜์— ์ „๋‹ฌํ•˜์„ธ์š”: ```python from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained("google/ddpm-cifar10-32", custom_pipeline="stevhliu/one_step_unet") ``` ๋‹ค์Œ ํ‘œ์—์„œ ๋‘ ๊ฐ€์ง€ ๊ณต์œ  ์›Œํฌํ”Œ๋กœ์šฐ๋ฅผ ๋น„๊ตํ•˜์—ฌ ์ž์‹ ์—๊ฒŒ ๊ฐ€์žฅ ์ ํ•ฉํ•œ ์˜ต์…˜์„ ๊ฒฐ์ •ํ•˜๋Š” ๋ฐ ๋„์›€์ด ๋˜๋Š” ์ •๋ณด๋ฅผ ํ™•์ธํ•˜์„ธ์š”: | | GitHub ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ | HF Hub ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ | |----------------|------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| | ์‚ฌ์šฉ๋ฒ• | ๋™์ผ | ๋™์ผ | | ๋ฆฌ๋ทฐ ๊ณผ์ • | ๋ณ‘ํ•ฉํ•˜๊ธฐ ์ „์— GitHub์—์„œ Pull Request๋ฅผ ์—ด๊ณ  Diffusers ํŒ€์˜ ๊ฒ€ํ†  ๊ณผ์ •์„ ๊ฑฐ์นฉ๋‹ˆ๋‹ค. ์†๋„๊ฐ€ ๋А๋ฆด ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. | ๊ฒ€ํ†  ์—†์ด Hub ์ €์žฅ์†Œ์— ๋ฐ”๋กœ ์—…๋กœ๋“œํ•ฉ๋‹ˆ๋‹ค. ๊ฐ€์žฅ ๋น ๋ฅธ ์›Œํฌํ”Œ๋กœ์šฐ ์ž…๋‹ˆ๋‹ค. | | ๊ฐ€์‹œ์„ฑ | ๊ณต์‹ Diffusers ์ €์žฅ์†Œ ๋ฐ ๋ฌธ์„œ์— ํฌํ•จ๋˜์–ด ์žˆ์Šต๋‹ˆ๋‹ค. | HF ํ—ˆ๋ธŒ ํ”„๋กœํ•„์— ํฌํ•จ๋˜๋ฉฐ ๊ฐ€์‹œ์„ฑ์„ ํ™•๋ณดํ•˜๊ธฐ ์œ„ํ•ด ์ž์‹ ์˜ ์‚ฌ์šฉ๋Ÿ‰/ํ”„๋กœ๋ชจ์…˜์— ์˜์กดํ•ฉ๋‹ˆ๋‹ค. | <Tip> ๐Ÿ’ก ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ ํŒŒ์ผ์— ์›ํ•˜๋Š” ํŒจํ‚ค์ง€๋ฅผ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์‚ฌ์šฉ์ž๊ฐ€ ํŒจํ‚ค์ง€๋ฅผ ์„ค์น˜ํ•˜๊ธฐ๋งŒ ํ•˜๋ฉด ๋ชจ๋“  ๊ฒƒ์ด ์ •์ƒ์ ์œผ๋กœ ์ž‘๋™ํ•ฉ๋‹ˆ๋‹ค. ํŒŒ์ดํ”„๋ผ์ธ์ด ์ž๋™์œผ๋กœ ๊ฐ์ง€๋˜๋ฏ€๋กœ `DiffusionPipeline`์—์„œ ์ƒ์†ํ•˜๋Š” ํŒŒ์ดํ”„๋ผ์ธ ํด๋ž˜์Šค๊ฐ€ ํ•˜๋‚˜๋งŒ ์žˆ๋Š”์ง€ ํ™•์ธํ•˜์„ธ์š”. </Tip> ## ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ์€ ์–ด๋–ป๊ฒŒ ์ž‘๋™ํ•˜๋‚˜์š”? ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ์€ [`DiffusionPipeline`]์„ ์ƒ์†ํ•˜๋Š” ํด๋ž˜์Šค์ž…๋‹ˆ๋‹ค: - [`custom_pipeline`] ์ธ์ˆ˜๋กœ ๋กœ๋“œํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. - ๋ชจ๋ธ ๊ฐ€์ค‘์น˜ ๋ฐ ์Šค์ผ€์ค„๋Ÿฌ ๊ตฌ์„ฑ์€ [`pretrained_model_name_or_path`]์—์„œ ๋กœ๋“œ๋ฉ๋‹ˆ๋‹ค. - ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ์—์„œ ๊ธฐ๋Šฅ์„ ๊ตฌํ˜„ํ•˜๋Š” ์ฝ”๋“œ๋Š” `pipeline.py` ํŒŒ์ผ์— ์ •์˜๋˜์–ด ์žˆ์Šต๋‹ˆ๋‹ค. ๊ณต์‹ ์ €์žฅ์†Œ์—์„œ ๋ชจ๋“  ํŒŒ์ดํ”„๋ผ์ธ ๊ตฌ์„ฑ ์š”์†Œ ๊ฐ€์ค‘์น˜๋ฅผ ๋กœ๋“œํ•  ์ˆ˜ ์—†๋Š” ๊ฒฝ์šฐ๊ฐ€ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด ๊ฒฝ์šฐ ๋‹ค๋ฅธ ๊ตฌ์„ฑ ์š”์†Œ๋Š” ํŒŒ์ดํ”„๋ผ์ธ์— ์ง์ ‘ ์ „๋‹ฌํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค: ```python from diffusers import DiffusionPipeline from transformers import CLIPFeatureExtractor, CLIPModel model_id = "CompVis/stable-diffusion-v1-4" clip_model_id = "laion/CLIP-ViT-B-32-laion2B-s34B-b79K" feature_extractor = CLIPFeatureExtractor.from_pretrained(clip_model_id) clip_model = CLIPModel.from_pretrained(clip_model_id, torch_dtype=torch.float16) pipeline = DiffusionPipeline.from_pretrained( model_id, custom_pipeline="clip_guided_stable_diffusion", clip_model=clip_model, feature_extractor=feature_extractor, scheduler=scheduler, torch_dtype=torch.float16, ) ``` ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ์˜ ๋งˆ๋ฒ•์€ ๋‹ค์Œ ์ฝ”๋“œ์— ๋‹ด๊ฒจ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด ์ฝ”๋“œ๋ฅผ ํ†ตํ•ด ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ์„ GitHub ๋˜๋Š” Hub์—์„œ ๋กœ๋“œํ•  ์ˆ˜ ์žˆ์œผ๋ฉฐ, ๋ชจ๋“  ๐Ÿงจ Diffusers ํŒจํ‚ค์ง€์—์„œ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ```python # 2. ํŒŒ์ดํ”„๋ผ์ธ ํด๋ž˜์Šค๋ฅผ ๋กœ๋“œํ•ฉ๋‹ˆ๋‹ค. ์‚ฌ์šฉ์ž ์ง€์ • ๋ชจ๋“ˆ์„ ์‚ฌ์šฉํ•˜๋Š” ๊ฒฝ์šฐ Hub์—์„œ ๋กœ๋“œํ•ฉ๋‹ˆ๋‹ค # ๋ช…์‹œ์  ํด๋ž˜์Šค์—์„œ ๋กœ๋“œํ•˜๋Š” ๊ฒฝ์šฐ, ์ด๋ฅผ ์‚ฌ์šฉํ•ด ๋ณด๊ฒ ์Šต๋‹ˆ๋‹ค. if custom_pipeline is not None: pipeline_class = get_class_from_dynamic_module( custom_pipeline, module_file=CUSTOM_PIPELINE_FILE_NAME, cache_dir=custom_pipeline ) elif cls != DiffusionPipeline: pipeline_class = cls else: diffusers_module = importlib.import_module(cls.__module__.split(".")[0]) pipeline_class = getattr(diffusers_module, config_dict["_class_name"]) ```
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/using-diffusers/depth2img.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Text-guided depth-to-image ์ƒ์„ฑ [[open-in-colab]] [`StableDiffusionDepth2ImgPipeline`]์„ ์‚ฌ์šฉํ•˜๋ฉด ํ…์ŠคํŠธ ํ”„๋กฌํ”„ํŠธ์™€ ์ดˆ๊ธฐ ์ด๋ฏธ์ง€๋ฅผ ์ „๋‹ฌํ•˜์—ฌ ์ƒˆ ์ด๋ฏธ์ง€์˜ ์ƒ์„ฑ์„ ์กฐ์ ˆํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋˜ํ•œ ์ด๋ฏธ์ง€ ๊ตฌ์กฐ๋ฅผ ๋ณด์กดํ•˜๊ธฐ ์œ„ํ•ด `depth_map`์„ ์ „๋‹ฌํ•  ์ˆ˜๋„ ์žˆ์Šต๋‹ˆ๋‹ค. `depth_map`์ด ์ œ๊ณต๋˜์ง€ ์•Š์œผ๋ฉด ํŒŒ์ดํ”„๋ผ์ธ์€ ํ†ตํ•ฉ๋œ [depth-estimation model](https://github.com/isl-org/MiDaS)์„ ํ†ตํ•ด ์ž๋™์œผ๋กœ ๊นŠ์ด๋ฅผ ์˜ˆ์ธกํ•ฉ๋‹ˆ๋‹ค. ๋จผ์ € [`StableDiffusionDepth2ImgPipeline`]์˜ ์ธ์Šคํ„ด์Šค๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค: ```python import torch import requests from PIL import Image from diffusers import StableDiffusionDepth2ImgPipeline pipe = StableDiffusionDepth2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-2-depth", torch_dtype=torch.float16, ).to("cuda") ``` ์ด์ œ ํ”„๋กฌํ”„ํŠธ๋ฅผ ํŒŒ์ดํ”„๋ผ์ธ์— ์ „๋‹ฌํ•ฉ๋‹ˆ๋‹ค. ํŠน์ • ๋‹จ์–ด๊ฐ€ ์ด๋ฏธ์ง€ ์ƒ์„ฑ์„ ๊ฐ€์ด๋“œ ํ•˜๋Š”๊ฒƒ์„ ๋ฐฉ์ง€ํ•˜๊ธฐ ์œ„ํ•ด `negative_prompt`๋ฅผ ์ „๋‹ฌํ•  ์ˆ˜๋„ ์žˆ์Šต๋‹ˆ๋‹ค: ```python url = "http://images.cocodataset.org/val2017/000000039769.jpg" init_image = Image.open(requests.get(url, stream=True).raw) prompt = "two tigers" n_prompt = "bad, deformed, ugly, bad anatomy" image = pipe(prompt=prompt, image=init_image, negative_prompt=n_prompt, strength=0.7).images[0] image ``` | Input | Output | |---------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------| | <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/coco-cats.png" width="500"/> | <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/depth2img-tigers.png" width="500"/> | ์•„๋ž˜์˜ Spaces๋ฅผ ๊ฐ€์ง€๊ณ  ๋†€๋ฉฐ depth map์ด ์žˆ๋Š” ์ด๋ฏธ์ง€์™€ ์—†๋Š” ์ด๋ฏธ์ง€์˜ ์ฐจ์ด๊ฐ€ ์žˆ๋Š”์ง€ ํ™•์ธํ•ด ๋ณด์„ธ์š”! <iframe src="https://radames-stable-diffusion-depth2img.hf.space" frameborder="0" width="850" height="500" ></iframe>
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/using-diffusers/conditional_image_generation.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ์กฐ๊ฑด๋ถ€ ์ด๋ฏธ์ง€ ์ƒ์„ฑ [[open-in-colab]] ์กฐ๊ฑด๋ถ€ ์ด๋ฏธ์ง€ ์ƒ์„ฑ์„ ์‚ฌ์šฉํ•˜๋ฉด ํ…์ŠคํŠธ ํ”„๋กฌํ”„ํŠธ์—์„œ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ํ…์ŠคํŠธ๋Š” ์ž„๋ฒ ๋”ฉ์œผ๋กœ ๋ณ€ํ™˜๋˜๋ฉฐ, ์ž„๋ฒ ๋”ฉ์€ ๋…ธ์ด์ฆˆ์—์„œ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•˜๋„๋ก ๋ชจ๋ธ์„ ์กฐ๊ฑดํ™”ํ•˜๋Š” ๋ฐ ์‚ฌ์šฉ๋ฉ๋‹ˆ๋‹ค. [`DiffusionPipeline`]์€ ์ถ”๋ก ์„ ์œ„ํ•ด ์‚ฌ์ „ ํ›ˆ๋ จ๋œ diffusion ์‹œ์Šคํ…œ์„ ์‚ฌ์šฉํ•˜๋Š” ๊ฐ€์žฅ ์‰ฌ์šด ๋ฐฉ๋ฒ•์ž…๋‹ˆ๋‹ค. ๋จผ์ € [`DiffusionPipeline`]์˜ ์ธ์Šคํ„ด์Šค๋ฅผ ์ƒ์„ฑํ•˜๊ณ  ๋‹ค์šด๋กœ๋“œํ•  ํŒŒ์ดํ”„๋ผ์ธ [์ฒดํฌํฌ์ธํŠธ](https://huggingface.co/models?library=diffusers&sort=downloads)๋ฅผ ์ง€์ •ํ•ฉ๋‹ˆ๋‹ค. ์ด ๊ฐ€์ด๋“œ์—์„œ๋Š” [์ž ์žฌ Diffusion](https://huggingface.co/CompVis/ldm-text2im-large-256)๊ณผ ํ•จ๊ป˜ ํ…์ŠคํŠธ-์ด๋ฏธ์ง€ ์ƒ์„ฑ์— [`DiffusionPipeline`]์„ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค: ```python >>> from diffusers import DiffusionPipeline >>> generator = DiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256") ``` [`DiffusionPipeline`]์€ ๋ชจ๋“  ๋ชจ๋ธ๋ง, ํ† ํฐํ™”, ์Šค์ผ€์ค„๋ง ๊ตฌ์„ฑ ์š”์†Œ๋ฅผ ๋‹ค์šด๋กœ๋“œํ•˜๊ณ  ์บ์‹œํ•ฉ๋‹ˆ๋‹ค. ์ด ๋ชจ๋ธ์€ ์•ฝ 14์–ต ๊ฐœ์˜ ํŒŒ๋ผ๋ฏธํ„ฐ๋กœ ๊ตฌ์„ฑ๋˜์–ด ์žˆ๊ธฐ ๋•Œ๋ฌธ์— GPU์—์„œ ์‹คํ–‰ํ•  ๊ฒƒ์„ ๊ฐ•๋ ฅํžˆ ๊ถŒ์žฅํ•ฉ๋‹ˆ๋‹ค. PyTorch์—์„œ์™€ ๋งˆ์ฐฌ๊ฐ€์ง€๋กœ ์ƒ์„ฑ๊ธฐ ๊ฐ์ฒด๋ฅผ GPU๋กœ ์ด๋™ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```python >>> generator.to("cuda") ``` ์ด์ œ ํ…์ŠคํŠธ ํ”„๋กฌํ”„ํŠธ์—์„œ `์ƒ์„ฑ๊ธฐ`๋ฅผ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```python >>> image = generator("An image of a squirrel in Picasso style").images[0] ``` ์ถœ๋ ฅ๊ฐ’์€ ๊ธฐ๋ณธ์ ์œผ๋กœ [`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html?highlight=image#the-image-class) ๊ฐ์ฒด๋กœ ๋ž˜ํ•‘๋ฉ๋‹ˆ๋‹ค. ํ˜ธ์ถœํ•˜์—ฌ ์ด๋ฏธ์ง€๋ฅผ ์ €์žฅํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```python >>> image.save("image_of_squirrel_painting.png") ``` ์•„๋ž˜ ์ŠคํŽ˜์ด์Šค๋ฅผ ์‚ฌ์šฉํ•ด๋ณด๊ณ  ์•ˆ๋‚ด ๋ฐฐ์œจ ๋งค๊ฐœ๋ณ€์ˆ˜๋ฅผ ์ž์œ ๋กญ๊ฒŒ ์กฐ์ •ํ•˜์—ฌ ์ด๋ฏธ์ง€ ํ’ˆ์งˆ์— ์–ด๋–ค ์˜ํ–ฅ์„ ๋ฏธ์น˜๋Š”์ง€ ํ™•์ธํ•ด ๋ณด์„ธ์š”! <iframe src="https://stabilityai-stable-diffusion.hf.space" frameborder="0" width="850" height="500" ></iframe>
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/using-diffusers/controlling_generation.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ์ œ์–ด๋œ ์ƒ์„ฑ Diffusion ๋ชจ๋ธ์— ์˜ํ•ด ์ƒ์„ฑ๋œ ์ถœ๋ ฅ์„ ์ œ์–ดํ•˜๋Š” ๊ฒƒ์€ ์ปค๋ฎค๋‹ˆํ‹ฐ์—์„œ ์˜ค๋žซ๋™์•ˆ ์ถ”๊ตฌํ•ด ์™”์œผ๋ฉฐ ํ˜„์žฌ ํ™œ๋ฐœํ•œ ์—ฐ๊ตฌ ์ฃผ์ œ์ž…๋‹ˆ๋‹ค. ๋„๋ฆฌ ์‚ฌ์šฉ๋˜๋Š” ๋งŽ์€ diffusion ๋ชจ๋ธ์—์„œ๋Š” ์ด๋ฏธ์ง€์™€ ํ…์ŠคํŠธ ํ”„๋กฌํ”„ํŠธ ๋“ฑ ์ž…๋ ฅ์˜ ๋ฏธ๋ฌ˜ํ•œ ๋ณ€ํ™”๋กœ ์ธํ•ด ์ถœ๋ ฅ์ด ํฌ๊ฒŒ ๋‹ฌ๋ผ์งˆ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด์ƒ์ ์ธ ์„ธ๊ณ„์—์„œ๋Š” ์˜๋ฏธ๊ฐ€ ์œ ์ง€๋˜๊ณ  ๋ณ€๊ฒฝ๋˜๋Š” ๋ฐฉ์‹์„ ์ œ์–ดํ•  ์ˆ˜ ์žˆ๊ธฐ๋ฅผ ์›ํ•ฉ๋‹ˆ๋‹ค. ์˜๋ฏธ ๋ณด์กด์˜ ๋Œ€๋ถ€๋ถ„์˜ ์˜ˆ๋Š” ์ž…๋ ฅ์˜ ๋ณ€ํ™”๋ฅผ ์ถœ๋ ฅ์˜ ๋ณ€ํ™”์— ์ •ํ™•ํ•˜๊ฒŒ ๋งคํ•‘ํ•˜๋Š” ๊ฒƒ์œผ๋กœ ์ถ•์†Œ๋ฉ๋‹ˆ๋‹ค. ์ฆ‰, ํ”„๋กฌํ”„ํŠธ์—์„œ ํ”ผ์‚ฌ์ฒด์— ํ˜•์šฉ์‚ฌ๋ฅผ ์ถ”๊ฐ€ํ•˜๋ฉด ์ „์ฒด ์ด๋ฏธ์ง€๊ฐ€ ๋ณด์กด๋˜๊ณ  ๋ณ€๊ฒฝ๋œ ํ”ผ์‚ฌ์ฒด๋งŒ ์ˆ˜์ •๋ฉ๋‹ˆ๋‹ค. ๋˜๋Š” ํŠน์ • ํ”ผ์‚ฌ์ฒด์˜ ์ด๋ฏธ์ง€๋ฅผ ๋ณ€ํ˜•ํ•˜๋ฉด ํ”ผ์‚ฌ์ฒด์˜ ํฌ์ฆˆ๊ฐ€ ์œ ์ง€๋ฉ๋‹ˆ๋‹ค. ์ถ”๊ฐ€์ ์œผ๋กœ ์ƒ์„ฑ๋œ ์ด๋ฏธ์ง€์˜ ํ’ˆ์งˆ์—๋Š” ์˜๋ฏธ ๋ณด์กด ์™ธ์—๋„ ์˜ํ–ฅ์„ ๋ฏธ์น˜๊ณ ์ž ํ•˜๋Š” ํ’ˆ์งˆ์ด ์žˆ์Šต๋‹ˆ๋‹ค. ์ฆ‰, ์ผ๋ฐ˜์ ์œผ๋กœ ๊ฒฐ๊ณผ๋ฌผ์˜ ํ’ˆ์งˆ์ด ์ข‹๊ฑฐ๋‚˜ ํŠน์ • ์Šคํƒ€์ผ์„ ๊ณ ์ˆ˜ํ•˜๊ฑฐ๋‚˜ ์‚ฌ์‹ค์ ์ด๊ธฐ๋ฅผ ์›ํ•ฉ๋‹ˆ๋‹ค. diffusion ๋ชจ๋ธ ์ƒ์„ฑ์„ ์ œ์–ดํ•˜๊ธฐ ์œ„ํ•ด `diffusers`๊ฐ€ ์ง€์›ํ•˜๋Š” ๋ช‡ ๊ฐ€์ง€ ๊ธฐ์ˆ ์„ ๋ฌธ์„œํ™”ํ•ฉ๋‹ˆ๋‹ค. ๋งŽ์€ ๋ถ€๋ถ„์ด ์ตœ์ฒจ๋‹จ ์—ฐ๊ตฌ์ด๋ฉฐ ๋ฏธ๋ฌ˜ํ•œ ์ฐจ์ด๊ฐ€ ์žˆ์„ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋ช…ํ™•ํ•œ ์„ค๋ช…์ด ํ•„์š”ํ•˜๊ฑฐ๋‚˜ ์ œ์•ˆ ์‚ฌํ•ญ์ด ์žˆ์œผ๋ฉด ์ฃผ์ €ํ•˜์ง€ ๋งˆ์‹œ๊ณ  [ํฌ๋Ÿผ](https://discuss.huggingface.co/) ๋˜๋Š” [GitHub ์ด์Šˆ](https://github.com/huggingface/diffusers/issues)์—์„œ ํ† ๋ก ์„ ์‹œ์ž‘ํ•˜์„ธ์š”. ์ƒ์„ฑ ์ œ์–ด ๋ฐฉ๋ฒ•์— ๋Œ€ํ•œ ๊ฐœ๋žต์ ์ธ ์„ค๋ช…๊ณผ ๊ธฐ์ˆ  ๊ฐœ์š”๋ฅผ ์ œ๊ณตํ•ฉ๋‹ˆ๋‹ค. ๊ธฐ์ˆ ์— ๋Œ€ํ•œ ์ž์„ธํ•œ ์„ค๋ช…์€ ํŒŒ์ดํ”„๋ผ์ธ์—์„œ ๋งํฌ๋œ ์›๋ณธ ๋…ผ๋ฌธ์„ ์ฐธ์กฐํ•˜๋Š” ๊ฒƒ์ด ๊ฐ€์žฅ ์ข‹์Šต๋‹ˆ๋‹ค. ์‚ฌ์šฉ ์‚ฌ๋ก€์— ๋”ฐ๋ผ ์ ์ ˆํ•œ ๊ธฐ์ˆ ์„ ์„ ํƒํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ๋งŽ์€ ๊ฒฝ์šฐ ์ด๋Ÿฌํ•œ ๊ธฐ๋ฒ•์„ ๊ฒฐํ•ฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์˜ˆ๋ฅผ ๋“ค์–ด, ํ…์ŠคํŠธ ๋ฐ˜์ „๊ณผ SEGA๋ฅผ ๊ฒฐํ•ฉํ•˜์—ฌ ํ…์ŠคํŠธ ๋ฐ˜์ „์„ ์‚ฌ์šฉํ•˜์—ฌ ์ƒ์„ฑ๋œ ์ถœ๋ ฅ์— ๋” ๋งŽ์€ ์˜๋ฏธ์  ์ง€์นจ์„ ์ œ๊ณตํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋ณ„๋„์˜ ์–ธ๊ธ‰์ด ์—†๋Š” ํ•œ, ์ด๋Ÿฌํ•œ ๊ธฐ๋ฒ•์€ ๊ธฐ์กด ๋ชจ๋ธ๊ณผ ํ•จ๊ป˜ ์ž‘๋™ํ•˜๋ฉฐ ์ž์ฒด ๊ฐ€์ค‘์น˜๊ฐ€ ํ•„์š”ํ•˜์ง€ ์•Š์€ ๊ธฐ๋ฒ•์ž…๋‹ˆ๋‹ค. 1. [Instruct Pix2Pix](#instruct-pix2pix) 2. [Pix2Pix Zero](#pix2pixzero) 3. [Attend and Excite](#attend-and-excite) 4. [Semantic Guidance](#semantic-guidance) 5. [Self-attention Guidance](#self-attention-guidance) 6. [Depth2Image](#depth2image) 7. [MultiDiffusion Panorama](#multidiffusion-panorama) 8. [DreamBooth](#dreambooth) 9. [Textual Inversion](#textual-inversion) 10. [ControlNet](#controlnet) 11. [Prompt Weighting](#prompt-weighting) 12. [Custom Diffusion](#custom-diffusion) 13. [Model Editing](#model-editing) 14. [DiffEdit](#diffedit) 15. [T2I-Adapter](#t2i-adapter) ํŽธ์˜๋ฅผ ์œ„ํ•ด, ์ถ”๋ก ๋งŒ ํ•˜๊ฑฐ๋‚˜ ํŒŒ์ธํŠœ๋‹/ํ•™์Šตํ•˜๋Š” ๋ฐฉ๋ฒ•์— ๋Œ€ํ•œ ํ‘œ๋ฅผ ์ œ๊ณตํ•ฉ๋‹ˆ๋‹ค. | **Method** | **Inference only** | **Requires training /<br> fine-tuning** | **Comments** | | :-------------------------------------------------: | :----------------: | :-------------------------------------: | :---------------------------------------------------------------------------------------------: | | [Instruct Pix2Pix](#instruct-pix2pix) | โœ… | โŒ | Can additionally be<br>fine-tuned for better <br>performance on specific <br>edit instructions. | | [Pix2Pix Zero](#pix2pixzero) | โœ… | โŒ | | | [Attend and Excite](#attend-and-excite) | โœ… | โŒ | | | [Semantic Guidance](#semantic-guidance) | โœ… | โŒ | | | [Self-attention Guidance](#self-attention-guidance) | โœ… | โŒ | | | [Depth2Image](#depth2image) | โœ… | โŒ | | | [MultiDiffusion Panorama](#multidiffusion-panorama) | โœ… | โŒ | | | [DreamBooth](#dreambooth) | โŒ | โœ… | | | [Textual Inversion](#textual-inversion) | โŒ | โœ… | | | [ControlNet](#controlnet) | โœ… | โŒ | A ControlNet can be <br>trained/fine-tuned on<br>a custom conditioning. | | [Prompt Weighting](#prompt-weighting) | โœ… | โŒ | | | [Custom Diffusion](#custom-diffusion) | โŒ | โœ… | | | [Model Editing](#model-editing) | โœ… | โŒ | | | [DiffEdit](#diffedit) | โœ… | โŒ | | | [T2I-Adapter](#t2i-adapter) | โœ… | โŒ | | ## Pix2Pix Instruct [Paper](https://arxiv.org/abs/2211.09800) [Instruct Pix2Pix](../api/pipelines/stable_diffusion/pix2pix) ๋Š” ์ž…๋ ฅ ์ด๋ฏธ์ง€ ํŽธ์ง‘์„ ์ง€์›ํ•˜๊ธฐ ์œ„ํ•ด stable diffusion์—์„œ ๋ฏธ์„ธ-์กฐ์ •๋˜์—ˆ์Šต๋‹ˆ๋‹ค. ์ด๋ฏธ์ง€์™€ ํŽธ์ง‘์„ ์„ค๋ช…ํ•˜๋Š” ํ”„๋กฌํ”„ํŠธ๋ฅผ ์ž…๋ ฅ์œผ๋กœ ๋ฐ›์•„ ํŽธ์ง‘๋œ ์ด๋ฏธ์ง€๋ฅผ ์ถœ๋ ฅํ•ฉ๋‹ˆ๋‹ค. Instruct Pix2Pix๋Š” [InstructGPT](https://openai.com/blog/instruction-following/)์™€ ๊ฐ™์€ ํ”„๋กฌํ”„ํŠธ์™€ ์ž˜ ์ž‘๋™ํ•˜๋„๋ก ๋ช…์‹œ์ ์œผ๋กœ ํ›ˆ๋ จ๋˜์—ˆ์Šต๋‹ˆ๋‹ค. ์‚ฌ์šฉ ๋ฐฉ๋ฒ•์— ๋Œ€ํ•œ ์ž์„ธํ•œ ๋‚ด์šฉ์€ [์—ฌ๊ธฐ](../api/pipelines/stable_diffusion/pix2pix)๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. ## Pix2Pix Zero [Paper](https://arxiv.org/abs/2302.03027) [Pix2Pix Zero](../api/pipelines/stable_diffusion/pix2pix_zero)๋ฅผ ์‚ฌ์šฉํ•˜๋ฉด ์ผ๋ฐ˜์ ์ธ ์ด๋ฏธ์ง€ ์˜๋ฏธ๋ฅผ ์œ ์ง€ํ•˜๋ฉด์„œ ํ•œ ๊ฐœ๋…์ด๋‚˜ ํ”ผ์‚ฌ์ฒด๊ฐ€ ๋‹ค๋ฅธ ๊ฐœ๋…์ด๋‚˜ ํ”ผ์‚ฌ์ฒด๋กœ ๋ณ€ํ™˜๋˜๋„๋ก ์ด๋ฏธ์ง€๋ฅผ ์ˆ˜์ •ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋…ธ์ด์ฆˆ ์ œ๊ฑฐ ํ”„๋กœ์„ธ์Šค๋Š” ํ•œ ๊ฐœ๋…์  ์ž„๋ฒ ๋”ฉ์—์„œ ๋‹ค๋ฅธ ๊ฐœ๋…์  ์ž„๋ฒ ๋”ฉ์œผ๋กœ ์•ˆ๋‚ด๋ฉ๋‹ˆ๋‹ค. ์ค‘๊ฐ„ ์ž ๋ณต(intermediate latents)์€ ๋””๋…ธ์ด์ง•(denoising?) ํ”„๋กœ์„ธ์Šค ์ค‘์— ์ตœ์ ํ™”๋˜์–ด ์ฐธ์กฐ ์ฃผ์˜ ์ง€๋„(reference attention maps)๋ฅผ ํ–ฅํ•ด ๋‚˜์•„๊ฐ‘๋‹ˆ๋‹ค. ์ฐธ์กฐ ์ฃผ์˜ ์ง€๋„(reference attention maps)๋Š” ์ž…๋ ฅ ์ด๋ฏธ์ง€์˜ ๋…ธ์ด์ฆˆ ์ œ๊ฑฐ(?) ํ”„๋กœ์„ธ์Šค์—์„œ ๋‚˜์˜จ ๊ฒƒ์œผ๋กœ ์˜๋ฏธ ๋ณด์กด์„ ์žฅ๋ คํ•˜๋Š” ๋ฐ ์‚ฌ์šฉ๋ฉ๋‹ˆ๋‹ค. Pix2Pix Zero๋Š” ํ•ฉ์„ฑ ์ด๋ฏธ์ง€์™€ ์‹ค์ œ ์ด๋ฏธ์ง€๋ฅผ ํŽธ์ง‘ํ•˜๋Š” ๋ฐ ๋ชจ๋‘ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. - ํ•ฉ์„ฑ ์ด๋ฏธ์ง€๋ฅผ ํŽธ์ง‘ํ•˜๋ ค๋ฉด ๋จผ์ € ์บก์…˜์ด ์ง€์ •๋œ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค. ๋‹ค์Œ์œผ๋กœ ํŽธ์ง‘ํ•  ์ปจ์…‰๊ณผ ์ƒˆ๋กœ์šด ํƒ€๊ฒŸ ์ปจ์…‰์— ๋Œ€ํ•œ ์ด๋ฏธ์ง€ ์บก์…˜์„ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค. ์ด๋ฅผ ์œ„ํ•ด [Flan-T5](https://huggingface.co/docs/transformers/model_doc/flan-t5)์™€ ๊ฐ™์€ ๋ชจ๋ธ์„ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๊ทธ๋Ÿฐ ๋‹ค์Œ ํ…์ŠคํŠธ ์ธ์ฝ”๋”๋ฅผ ํ†ตํ•ด ์†Œ์Šค ๊ฐœ๋…๊ณผ ๋Œ€์ƒ ๊ฐœ๋… ๋ชจ๋‘์— ๋Œ€ํ•œ "ํ‰๊ท " ํ”„๋กฌํ”„ํŠธ ์ž„๋ฒ ๋”ฉ์„ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค. ๋งˆ์ง€๋ง‰์œผ๋กœ, ํ•ฉ์„ฑ ์ด๋ฏธ์ง€๋ฅผ ํŽธ์ง‘ํ•˜๊ธฐ ์œ„ํ•ด pix2pix-zero ์•Œ๊ณ ๋ฆฌ์ฆ˜์„ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค. - ์‹ค์ œ ์ด๋ฏธ์ง€๋ฅผ ํŽธ์ง‘ํ•˜๋ ค๋ฉด ๋จผ์ € [BLIP](https://huggingface.co/docs/transformers/model_doc/blip)๊ณผ ๊ฐ™์€ ๋ชจ๋ธ์„ ์‚ฌ์šฉํ•˜์—ฌ ์ด๋ฏธ์ง€ ์บก์…˜์„ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค. ๊ทธ๋Ÿฐ ๋‹ค์Œ ํ”„๋กฌํ”„ํŠธ์™€ ์ด๋ฏธ์ง€์— ddim ๋ฐ˜์ „์„ ์ ์šฉํ•˜์—ฌ "์—ญ(inverse)" latents์„ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค. ์ด์ „๊ณผ ๋งˆ์ฐฌ๊ฐ€์ง€๋กœ ์†Œ์Šค ๋ฐ ๋Œ€์ƒ ๊ฐœ๋… ๋ชจ๋‘์— ๋Œ€ํ•œ "ํ‰๊ท (mean)" ํ”„๋กฌํ”„ํŠธ ์ž„๋ฒ ๋”ฉ์ด ์ƒ์„ฑ๋˜๊ณ  ๋งˆ์ง€๋ง‰์œผ๋กœ "์—ญ(inverse)" latents์™€ ๊ฒฐํ•ฉ๋œ pix2pix-zero ์•Œ๊ณ ๋ฆฌ์ฆ˜์ด ์ด๋ฏธ์ง€๋ฅผ ํŽธ์ง‘ํ•˜๋Š” ๋ฐ ์‚ฌ์šฉ๋ฉ๋‹ˆ๋‹ค. <Tip> Pix2Pix Zero๋Š” '์ œ๋กœ ์ƒท(zero-shot)' ์ด๋ฏธ์ง€ ํŽธ์ง‘์ด ๊ฐ€๋Šฅํ•œ ์ตœ์ดˆ์˜ ๋ชจ๋ธ์ž…๋‹ˆ๋‹ค. ์ฆ‰, ์ด ๋ชจ๋ธ์€ ๋‹ค์Œ๊ณผ ๊ฐ™์ด ์ผ๋ฐ˜ ์†Œ๋น„์ž์šฉ GPU์—์„œ 1๋ถ„ ์ด๋‚ด์— ์ด๋ฏธ์ง€๋ฅผ ํŽธ์ง‘ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค(../api/pipelines/stable_diffusion/pix2pix_zero#usage-example). </Tip> ์œ„์—์„œ ์–ธ๊ธ‰ํ–ˆ๋“ฏ์ด Pix2Pix Zero์—๋Š” ํŠน์ • ๊ฐœ๋…์œผ๋กœ ์„ธ๋Œ€๋ฅผ ์œ ๋„ํ•˜๊ธฐ ์œ„ํ•ด (UNet, VAE ๋˜๋Š” ํ…์ŠคํŠธ ์ธ์ฝ”๋”๊ฐ€ ์•„๋‹Œ) latents์„ ์ตœ์ ํ™”ํ•˜๋Š” ๊ธฐ๋Šฅ์ด ํฌํ•จ๋˜์–ด ์žˆ์Šต๋‹ˆ๋‹ค.์ฆ‰, ์ „์ฒด ํŒŒ์ดํ”„๋ผ์ธ์— ํ‘œ์ค€ [StableDiffusionPipeline](../api/pipelines/stable_diffusion/text2img)๋ณด๋‹ค ๋” ๋งŽ์€ ๋ฉ”๋ชจ๋ฆฌ๊ฐ€ ํ•„์š”ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์‚ฌ์šฉ ๋ฐฉ๋ฒ•์— ๋Œ€ํ•œ ์ž์„ธํ•œ ๋‚ด์šฉ์€ [์—ฌ๊ธฐ](../api/pipelines/stable_diffusion/pix2pix_zero)๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. ## Attend and Excite [Paper](https://arxiv.org/abs/2301.13826) [Attend and Excite](../api/pipelines/stable_diffusion/attend_and_excite)๋ฅผ ์‚ฌ์šฉํ•˜๋ฉด ํ”„๋กฌํ”„ํŠธ์˜ ํ”ผ์‚ฌ์ฒด๊ฐ€ ์ตœ์ข… ์ด๋ฏธ์ง€์— ์ถฉ์‹คํ•˜๊ฒŒ ํ‘œํ˜„๋˜๋„๋ก ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด๋ฏธ์ง€์— ์กด์žฌํ•ด์•ผ ํ•˜๋Š” ํ”„๋กฌํ”„ํŠธ์˜ ํ”ผ์‚ฌ์ฒด์— ํ•ด๋‹นํ•˜๋Š” ์ผ๋ จ์˜ ํ† ํฐ ์ธ๋ฑ์Šค๊ฐ€ ์ž…๋ ฅ์œผ๋กœ ์ œ๊ณต๋ฉ๋‹ˆ๋‹ค. ๋…ธ์ด์ฆˆ ์ œ๊ฑฐ ์ค‘์— ๊ฐ ํ† ํฐ ์ธ๋ฑ์Šค๋Š” ์ด๋ฏธ์ง€์˜ ์ตœ์†Œ ํ•œ ํŒจ์น˜ ์ด์ƒ์— ๋Œ€ํ•ด ์ตœ์†Œ ์ฃผ์˜ ์ž„๊ณ„๊ฐ’์„ ๊ฐ–๋„๋ก ๋ณด์žฅ๋ฉ๋‹ˆ๋‹ค. ๋ชจ๋“  ํ”ผ์‚ฌ์ฒด ํ† ํฐ์— ๋Œ€ํ•ด ์ฃผ์˜ ์ž„๊ณ„๊ฐ’์ด ํ†ต๊ณผ๋  ๋•Œ๊นŒ์ง€ ๋…ธ์ด์ฆˆ ์ œ๊ฑฐ ํ”„๋กœ์„ธ์Šค ์ค‘์— ์ค‘๊ฐ„ ์ž ๋ณต๊ธฐ๊ฐ€ ๋ฐ˜๋ณต์ ์œผ๋กœ ์ตœ์ ํ™”๋˜์–ด ๊ฐ€์žฅ ์†Œํ™€ํžˆ ์ทจ๊ธ‰๋˜๋Š” ํ”ผ์‚ฌ์ฒด ํ† ํฐ์˜ ์ฃผ์˜๋ ฅ์„ ๊ฐ•ํ™”ํ•ฉ๋‹ˆ๋‹ค. Pix2Pix Zero์™€ ๋งˆ์ฐฌ๊ฐ€์ง€๋กœ Attend and Excite ์—ญ์‹œ ํŒŒ์ดํ”„๋ผ์ธ์— ๋ฏธ๋‹ˆ ์ตœ์ ํ™” ๋ฃจํ”„(์‚ฌ์ „ ํ•™์Šต๋œ ๊ฐ€์ค‘์น˜๋ฅผ ๊ทธ๋Œ€๋กœ ๋‘” ์ฑ„)๊ฐ€ ํฌํ•จ๋˜๋ฉฐ, ์ผ๋ฐ˜์ ์ธ 'StableDiffusionPipeline'๋ณด๋‹ค ๋” ๋งŽ์€ ๋ฉ”๋ชจ๋ฆฌ๊ฐ€ ํ•„์š”ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์‚ฌ์šฉ ๋ฐฉ๋ฒ•์— ๋Œ€ํ•œ ์ž์„ธํ•œ ๋‚ด์šฉ์€ [์—ฌ๊ธฐ](../api/pipelines/stable_diffusion/attend_and_excite)๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. ## Semantic Guidance (SEGA) [Paper](https://arxiv.org/abs/2301.12247) ์˜๋ฏธ์œ ๋„(SEGA)๋ฅผ ์‚ฌ์šฉํ•˜๋ฉด ์ด๋ฏธ์ง€์—์„œ ํ•˜๋‚˜ ์ด์ƒ์˜ ์ปจ์…‰์„ ์ ์šฉํ•˜๊ฑฐ๋‚˜ ์ œ๊ฑฐํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ปจ์…‰์˜ ๊ฐ•๋„๋„ ์กฐ์ ˆํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ฆ‰, ์Šค๋งˆ์ผ ์ปจ์…‰์„ ์‚ฌ์šฉํ•˜์—ฌ ์ธ๋ฌผ ์‚ฌ์ง„์˜ ์Šค๋งˆ์ผ์„ ์ ์ง„์ ์œผ๋กœ ๋Š˜๋ฆฌ๊ฑฐ๋‚˜ ์ค„์ผ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋ถ„๋ฅ˜๊ธฐ ๋ฌด๋ฃŒ ์•ˆ๋‚ด(classifier free guidance)๊ฐ€ ๋นˆ ํ”„๋กฌํ”„ํŠธ ์ž…๋ ฅ์„ ํ†ตํ•ด ์•ˆ๋‚ด๋ฅผ ์ œ๊ณตํ•˜๋Š” ๋ฐฉ์‹๊ณผ ์œ ์‚ฌํ•˜๊ฒŒ, SEGA๋Š” ๊ฐœ๋… ํ”„๋กฌํ”„ํŠธ์— ๋Œ€ํ•œ ์•ˆ๋‚ด๋ฅผ ์ œ๊ณตํ•ฉ๋‹ˆ๋‹ค. ์ด๋Ÿฌํ•œ ๊ฐœ๋… ํ”„๋กฌํ”„ํŠธ๋Š” ์—ฌ๋Ÿฌ ๊ฐœ๋ฅผ ๋™์‹œ์— ์ ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๊ฐ ๊ฐœ๋… ํ”„๋กฌํ”„ํŠธ๋Š” ์•ˆ๋‚ด๊ฐ€ ๊ธ์ •์ ์œผ๋กœ ์ ์šฉ๋˜๋Š”์ง€ ๋˜๋Š” ๋ถ€์ •์ ์œผ๋กœ ์ ์šฉ๋˜๋Š”์ง€์— ๋”ฐ๋ผ ํ•ด๋‹น ๊ฐœ๋…์„ ์ถ”๊ฐ€ํ•˜๊ฑฐ๋‚˜ ์ œ๊ฑฐํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. Pix2Pix Zero ๋˜๋Š” Attend and Excite์™€ ๋‹ฌ๋ฆฌ SEGA๋Š” ๋ช…์‹œ์ ์ธ ๊ทธ๋ผ๋ฐ์ด์…˜ ๊ธฐ๋ฐ˜ ์ตœ์ ํ™”๋ฅผ ์ˆ˜ํ–‰ํ•˜๋Š” ๋Œ€์‹  ํ™•์‚ฐ ํ”„๋กœ์„ธ์Šค์™€ ์ง์ ‘ ์ƒํ˜ธ ์ž‘์šฉํ•ฉ๋‹ˆ๋‹ค. ์‚ฌ์šฉ ๋ฐฉ๋ฒ•์— ๋Œ€ํ•œ ์ž์„ธํ•œ ๋‚ด์šฉ์€ [์—ฌ๊ธฐ](../api/pipelines/semantic_stable_diffusion)๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. ## Self-attention Guidance (SAG) [Paper](https://arxiv.org/abs/2210.00939) [์ž๊ธฐ ์ฃผ์˜ ์•ˆ๋‚ด](../api/pipelines/stable_diffusion/self_attention_guidance)๋Š” ์ด๋ฏธ์ง€์˜ ์ „๋ฐ˜์ ์ธ ํ’ˆ์งˆ์„ ๊ฐœ์„ ํ•ฉ๋‹ˆ๋‹ค. SAG๋Š” ๊ณ ๋นˆ๋„ ์„ธ๋ถ€ ์ •๋ณด๋ฅผ ๊ธฐ๋ฐ˜์œผ๋กœ ํ•˜์ง€ ์•Š์€ ์˜ˆ์ธก์—์„œ ์™„์ „ํžˆ ์กฐ๊ฑดํ™”๋œ ์ด๋ฏธ์ง€์— ์ด๋ฅด๊ธฐ๊นŒ์ง€ ๊ฐ€์ด๋“œ๋ฅผ ์ œ๊ณตํ•ฉ๋‹ˆ๋‹ค. ๊ณ ๋นˆ๋„ ๋””ํ…Œ์ผ์€ UNet ์ž๊ธฐ ์ฃผ์˜ ๋งต์—์„œ ์ถ”์ถœ๋ฉ๋‹ˆ๋‹ค. ์‚ฌ์šฉ ๋ฐฉ๋ฒ•์— ๋Œ€ํ•œ ์ž์„ธํ•œ ๋‚ด์šฉ์€ [์—ฌ๊ธฐ](../api/pipelines/stable_diffusion/self_attention_guidance)๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. ## Depth2Image [Project](https://huggingface.co/stabilityai/stable-diffusion-2-depth) [Depth2Image](../pipelines/stable_diffusion_2#depthtoimage)๋Š” ํ…์ŠคํŠธ ์•ˆ๋‚ด ์ด๋ฏธ์ง€ ๋ณ€ํ™”์— ๋Œ€ํ•œ ์‹œ๋งจํ‹ฑ์„ ๋” ์ž˜ ๋ณด์กดํ•˜๋„๋ก ์•ˆ์ •์  ํ™•์‚ฐ์—์„œ ๋ฏธ์„ธ ์กฐ์ •๋˜์—ˆ์Šต๋‹ˆ๋‹ค. ์›๋ณธ ์ด๋ฏธ์ง€์˜ ๋‹จ์•ˆ(monocular) ๊นŠ์ด ์ถ”์ •์น˜๋ฅผ ์กฐ๊ฑด์œผ๋กœ ํ•ฉ๋‹ˆ๋‹ค. ์‚ฌ์šฉ ๋ฐฉ๋ฒ•์— ๋Œ€ํ•œ ์ž์„ธํ•œ ๋‚ด์šฉ์€ [์—ฌ๊ธฐ](../api/pipelines/stable_diffusion_2#depthtoimage)๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. <Tip> InstructPix2Pix์™€ Pix2Pix Zero์™€ ๊ฐ™์€ ๋ฐฉ๋ฒ•์˜ ์ค‘์š”ํ•œ ์ฐจ์ด์ ์€ ์ „์ž์˜ ๊ฒฝ์šฐ ๋Š” ์‚ฌ์ „ ํ•™์Šต๋œ ๊ฐ€์ค‘์น˜๋ฅผ ๋ฏธ์„ธ ์กฐ์ •ํ•˜๋Š” ๋ฐ˜๋ฉด, ํ›„์ž๋Š” ๊ทธ๋ ‡์ง€ ์•Š๋‹ค๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค. ์ฆ‰, ๋‹ค์Œ์„ ์ˆ˜ํ–‰ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์‚ฌ์šฉ ๊ฐ€๋Šฅํ•œ ๋ชจ๋“  ์•ˆ์ •์  ํ™•์‚ฐ ๋ชจ๋ธ์— Pix2Pix Zero๋ฅผ ์ ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. </Tip> ## MultiDiffusion Panorama [Paper](https://arxiv.org/abs/2302.08113) MultiDiffusion์€ ์‚ฌ์ „ ํ•™์Šต๋œ diffusion model์„ ํ†ตํ•ด ์ƒˆ๋กœ์šด ์ƒ์„ฑ ํ”„๋กœ์„ธ์Šค๋ฅผ ์ •์˜ํ•ฉ๋‹ˆ๋‹ค. ์ด ํ”„๋กœ์„ธ์Šค๋Š” ๊ณ ํ’ˆ์งˆ์˜ ๋‹ค์–‘ํ•œ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•˜๋Š” ๋ฐ ์‰ฝ๊ฒŒ ์ ์šฉํ•  ์ˆ˜ ์žˆ๋Š” ์—ฌ๋Ÿฌ diffusion ์ƒ์„ฑ ๋ฐฉ๋ฒ•์„ ํ•˜๋‚˜๋กœ ๋ฌถ์Šต๋‹ˆ๋‹ค. ๊ฒฐ๊ณผ๋Š” ์›ํ•˜๋Š” ์ข…ํšก๋น„(์˜ˆ: ํŒŒ๋…ธ๋ผ๋งˆ) ๋ฐ ํƒ€์ดํŠธํ•œ ๋ถ„ํ•  ๋งˆ์Šคํฌ์—์„œ ๋ฐ”์šด๋”ฉ ๋ฐ•์Šค์— ์ด๋ฅด๋Š” ๊ณต๊ฐ„ ์•ˆ๋‚ด ์‹ ํ˜ธ์™€ ๊ฐ™์€ ์‚ฌ์šฉ์ž๊ฐ€ ์ œ๊ณตํ•œ ์ œ์–ด๋ฅผ ์ค€์ˆ˜ํ•ฉ๋‹ˆ๋‹ค. [MultiDiffusion ํŒŒ๋…ธ๋ผ๋งˆ](../api/pipelines/stable_diffusion/panorama)๋ฅผ ์‚ฌ์šฉํ•˜๋ฉด ์ž„์˜์˜ ์ข…ํšก๋น„(์˜ˆ: ํŒŒ๋…ธ๋ผ๋งˆ)๋กœ ๊ณ ํ’ˆ์งˆ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ํŒŒ๋…ธ๋ผ๋งˆ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•˜๋Š” ๋ฐ ์‚ฌ์šฉํ•˜๋Š” ๋ฐฉ๋ฒ•์— ๋Œ€ํ•œ ์ž์„ธํ•œ ๋‚ด์šฉ์€ [์—ฌ๊ธฐ](../api/pipelines/stable_diffusion/panorama)๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. ## ๋‚˜๋งŒ์˜ ๋ชจ๋ธ ํŒŒ์ธํŠœ๋‹ ์‚ฌ์ „ ํ•™์Šต๋œ ๋ชจ๋ธ ์™ธ์—๋„ Diffusers๋Š” ์‚ฌ์šฉ์ž๊ฐ€ ์ œ๊ณตํ•œ ๋ฐ์ดํ„ฐ์— ๋Œ€ํ•ด ๋ชจ๋ธ์„ ํŒŒ์ธํŠœ๋‹ํ•  ์ˆ˜ ์žˆ๋Š” ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ๊ฐ€ ์žˆ์Šต๋‹ˆ๋‹ค. ## DreamBooth [DreamBooth](../training/dreambooth)๋Š” ๋ชจ๋ธ์„ ํŒŒ์ธํŠœ๋‹ํ•˜์—ฌ ์ƒˆ๋กœ์šด ์ฃผ์ œ์— ๋Œ€ํ•ด ๊ฐ€๋ฅด์นฉ๋‹ˆ๋‹ค. ์ฆ‰, ํ•œ ์‚ฌ๋žŒ์˜ ์‚ฌ์ง„ ๋ช‡ ์žฅ์„ ์‚ฌ์šฉํ•˜์—ฌ ๋‹ค์–‘ํ•œ ์Šคํƒ€์ผ๋กœ ๊ทธ ์‚ฌ๋žŒ์˜ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์‚ฌ์šฉ ๋ฐฉ๋ฒ•์— ๋Œ€ํ•œ ์ž์„ธํ•œ ๋‚ด์šฉ์€ [์—ฌ๊ธฐ](../training/dreambooth)๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. ## Textual Inversion [Textual Inversion](../training/text_inversion)์€ ๋ชจ๋ธ์„ ํŒŒ์ธํŠœ๋‹ํ•˜์—ฌ ์ƒˆ๋กœ์šด ๊ฐœ๋…์— ๋Œ€ํ•ด ํ•™์Šต์‹œํ‚ต๋‹ˆ๋‹ค. ์ฆ‰, ํŠน์ • ์Šคํƒ€์ผ์˜ ์•„ํŠธ์› ์‚ฌ์ง„ ๋ช‡ ์žฅ์„ ์‚ฌ์šฉํ•˜์—ฌ ํ•ด๋‹น ์Šคํƒ€์ผ์˜ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์‚ฌ์šฉ ๋ฐฉ๋ฒ•์— ๋Œ€ํ•œ ์ž์„ธํ•œ ๋‚ด์šฉ์€ [์—ฌ๊ธฐ](../training/text_inversion)๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. ## ControlNet [Paper](https://arxiv.org/abs/2302.05543) [ControlNet](../api/pipelines/stable_diffusion/controlnet)์€ ์ถ”๊ฐ€ ์กฐ๊ฑด์„ ์ถ”๊ฐ€ํ•˜๋Š” ๋ณด์กฐ ๋„คํŠธ์›Œํฌ์ž…๋‹ˆ๋‹ค. ๊ฐ€์žฅ์ž๋ฆฌ ๊ฐ์ง€, ๋‚™์„œ, ๊นŠ์ด ๋งต, ์˜๋ฏธ์  ์„ธ๊ทธ๋จผํŠธ์™€ ๊ฐ™์€ ๋‹ค์–‘ํ•œ ์กฐ๊ฑด์— ๋Œ€ํ•ด ํ›ˆ๋ จ๋œ 8๊ฐœ์˜ ํ‘œ์ค€ ์‚ฌ์ „ ํ›ˆ๋ จ๋œ ControlNet์ด ์žˆ์Šต๋‹ˆ๋‹ค, ๊นŠ์ด ๋งต, ์‹œ๋งจํ‹ฑ ์„ธ๊ทธ๋จผํ…Œ์ด์…˜๊ณผ ๊ฐ™์€ ๋‹ค์–‘ํ•œ ์กฐ๊ฑด์œผ๋กœ ํ›ˆ๋ จ๋œ 8๊ฐœ์˜ ํ‘œ์ค€ ์ œ์–ด๋ง์ด ์žˆ์Šต๋‹ˆ๋‹ค. ์‚ฌ์šฉ ๋ฐฉ๋ฒ•์— ๋Œ€ํ•œ ์ž์„ธํ•œ ๋‚ด์šฉ์€ [์—ฌ๊ธฐ](../api/pipelines/stable_diffusion/controlnet)๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. ## Prompt Weighting ํ”„๋กฌํ”„ํŠธ ๊ฐ€์ค‘์น˜๋Š” ํ…์ŠคํŠธ์˜ ํŠน์ • ๋ถ€๋ถ„์— ๋” ๋งŽ์€ ๊ด€์‹ฌ ๊ฐ€์ค‘์น˜๋ฅผ ๋ถ€์—ฌํ•˜๋Š” ๊ฐ„๋‹จํ•œ ๊ธฐ๋ฒ•์ž…๋‹ˆ๋‹ค. ์ž…๋ ฅ์— ๊ฐ€์ค‘์น˜๋ฅผ ๋ถ€์—ฌํ•˜๋Š” ๊ฐ„๋‹จํ•œ ๊ธฐ๋ฒ•์ž…๋‹ˆ๋‹ค. ์ž์„ธํ•œ ์„ค๋ช…๊ณผ ์˜ˆ์‹œ๋Š” [์—ฌ๊ธฐ](../using-diffusers/weighted_prompts)๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. ## Custom Diffusion [Custom Diffusion](../training/custom_diffusion)์€ ์‚ฌ์ „ ํ•™์Šต๋œ text-to-image ๊ฐ„ ํ™•์‚ฐ ๋ชจ๋ธ์˜ ๊ต์ฐจ ๊ด€์‹ฌ๋„ ๋งต๋งŒ ๋ฏธ์„ธ ์กฐ์ •ํ•ฉ๋‹ˆ๋‹ค. ๋˜ํ•œ textual inversion์„ ์ถ”๊ฐ€๋กœ ์ˆ˜ํ–‰ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์„ค๊ณ„์ƒ ๋‹ค์ค‘ ๊ฐœ๋… ํ›ˆ๋ จ์„ ์ง€์›ํ•ฉ๋‹ˆ๋‹ค. DreamBooth ๋ฐ Textual Inversion ๋งˆ์ฐฌ๊ฐ€์ง€๋กœ, ์‚ฌ์šฉ์ž ์ง€์ • ํ™•์‚ฐ์€ ์‚ฌ์ „ํ•™์Šต๋œ text-to-image diffusion ๋ชจ๋ธ์— ์ƒˆ๋กœ์šด ๊ฐœ๋…์„ ํ•™์Šต์‹œ์ผœ ๊ด€์‹ฌ ์žˆ๋Š” ๊ฐœ๋…๊ณผ ๊ด€๋ จ๋œ ์ถœ๋ ฅ์„ ์ƒ์„ฑํ•˜๋Š” ๋ฐ์—๋„ ์‚ฌ์šฉ๋ฉ๋‹ˆ๋‹ค. ์ž์„ธํ•œ ์„ค๋ช…์€ [๊ณต์‹ ๋ฌธ์„œ](../training/custom_diffusion)๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. ## Model Editing [Paper](https://arxiv.org/abs/2303.08084) [ํ…์ŠคํŠธ-์ด๋ฏธ์ง€ ๋ชจ๋ธ ํŽธ์ง‘ ํŒŒ์ดํ”„๋ผ์ธ](../api/pipelines/model_editing)์„ ์‚ฌ์šฉํ•˜๋ฉด ์‚ฌ์ „ํ•™์Šต๋œ text-to-image diffusion ๋ชจ๋ธ์ด ์ž…๋ ฅ ํ”„๋กฌํ”„ํŠธ์— ์žˆ๋Š” ํ”ผ์‚ฌ์ฒด์— ๋Œ€ํ•ด ๋‚ด๋ฆด ์ˆ˜ ์žˆ๋Š” ์ž˜๋ชป๋œ ์•”์‹œ์  ๊ฐ€์ •์„ ์™„ํ™”ํ•˜๋Š” ๋ฐ ๋„์›€์ด ๋ฉ๋‹ˆ๋‹ค. ์˜ˆ๋ฅผ ๋“ค์–ด, ์•ˆ์ •์  ํ™•์‚ฐ์— "A pack of roses"์— ๋Œ€ํ•œ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•˜๋ผ๋Š” ๋ฉ”์‹œ์ง€๋ฅผ ํ‘œ์‹œํ•˜๋ฉด ์ƒ์„ฑ๋œ ์ด๋ฏธ์ง€์˜ ์žฅ๋ฏธ๋Š” ๋นจ๊ฐ„์ƒ‰์ผ ๊ฐ€๋Šฅ์„ฑ์ด ๋†’์Šต๋‹ˆ๋‹ค. ์ด ํŒŒ์ดํ”„๋ผ์ธ์€ ์ด๋Ÿฌํ•œ ๊ฐ€์ •์„ ๋ณ€๊ฒฝํ•˜๋Š” ๋ฐ ๋„์›€์ด ๋ฉ๋‹ˆ๋‹ค. ์ž์„ธํ•œ ์„ค๋ช…์€ [๊ณต์‹ ๋ฌธ์„œ](../api/pipelines/model_editing)๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. ## DiffEdit [Paper](https://arxiv.org/abs/2210.11427) [DiffEdit](../api/pipelines/diffedit)๋ฅผ ์‚ฌ์šฉํ•˜๋ฉด ์›๋ณธ ์ž…๋ ฅ ์ด๋ฏธ์ง€๋ฅผ ์ตœ๋Œ€ํ•œ ๋ณด์กดํ•˜๋ฉด์„œ ์ž…๋ ฅ ํ”„๋กฌํ”„ํŠธ์™€ ํ•จ๊ป˜ ์ž…๋ ฅ ์ด๋ฏธ์ง€์˜ ์˜๋ฏธ๋ก ์  ํŽธ์ง‘์ด ๊ฐ€๋Šฅํ•ฉ๋‹ˆ๋‹ค. ์ž์„ธํ•œ ์„ค๋ช…์€ [๊ณต์‹ ๋ฌธ์„œ](../api/pipelines/diffedit)๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. ## T2I-Adapter [Paper](https://arxiv.org/abs/2302.08453) [T2I-์–ด๋Œ‘ํ„ฐ](../api/pipelines/stable_diffusion/adapter)๋Š” ์ถ”๊ฐ€์ ์ธ ์กฐ๊ฑด์„ ์ถ”๊ฐ€ํ•˜๋Š” auxiliary ๋„คํŠธ์›Œํฌ์ž…๋‹ˆ๋‹ค. ๊ฐ€์žฅ์ž๋ฆฌ ๊ฐ์ง€, ์Šค์ผ€์น˜, depth maps, semantic segmentations์™€ ๊ฐ™์€ ๋‹ค์–‘ํ•œ ์กฐ๊ฑด์— ๋Œ€ํ•ด ํ›ˆ๋ จ๋œ 8๊ฐœ์˜ ํ‘œ์ค€ ์‚ฌ์ „ํ›ˆ๋ จ๋œ adapter๊ฐ€ ์žˆ์Šต๋‹ˆ๋‹ค, [๊ณต์‹ ๋ฌธ์„œ](api/pipelines/stable_diffusion/adapter)์—์„œ ์‚ฌ์šฉ ๋ฐฉ๋ฒ•์— ๋Œ€ํ•œ ์ •๋ณด๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”.
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/using-diffusers/textual_inversion_inference.md
# Textual inversion [[open-in-colab]] [`StableDiffusionPipeline`]์€ textual-inversion์„ ์ง€์›ํ•˜๋Š”๋ฐ, ์ด๋Š” ๋ช‡ ๊ฐœ์˜ ์ƒ˜ํ”Œ ์ด๋ฏธ์ง€๋งŒ์œผ๋กœ stable diffusion๊ณผ ๊ฐ™์€ ๋ชจ๋ธ์ด ์ƒˆ๋กœ์šด ์ปจ์…‰์„ ํ•™์Šตํ•  ์ˆ˜ ์žˆ๋„๋ก ํ•˜๋Š” ๊ธฐ๋ฒ•์ž…๋‹ˆ๋‹ค. ์ด๋ฅผ ํ†ตํ•ด ์ƒ์„ฑ๋œ ์ด๋ฏธ์ง€๋ฅผ ๋” ์ž˜ ์ œ์–ดํ•˜๊ณ  ํŠน์ • ์ปจ์…‰์— ๋งž๊ฒŒ ๋ชจ๋ธ์„ ์กฐ์ •ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ปค๋ฎค๋‹ˆํ‹ฐ์—์„œ ๋งŒ๋“ค์–ด์ง„ ์ปจ์…‰๋“ค์˜ ์ปฌ๋ ‰์…˜์€ [Stable Diffusion Conceptualizer](https://huggingface.co/spaces/sd-concepts-library/stable-diffusion-conceptualizer)๋ฅผ ํ†ตํ•ด ๋น ๋ฅด๊ฒŒ ์‚ฌ์šฉํ•ด๋ณผ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด ๊ฐ€์ด๋“œ์—์„œ๋Š” Stable Diffusion Conceptualizer์—์„œ ์‚ฌ์ „ํ•™์Šตํ•œ ์ปจ์…‰์„ ์‚ฌ์šฉํ•˜์—ฌ textual-inversion์œผ๋กœ ์ถ”๋ก ์„ ์‹คํ–‰ํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ๋ณด์—ฌ๋“œ๋ฆฝ๋‹ˆ๋‹ค. textual-inversion์œผ๋กœ ๋ชจ๋ธ์— ์ƒˆ๋กœ์šด ์ปจ์…‰์„ ํ•™์Šต์‹œํ‚ค๋Š” ๋ฐ ๊ด€์‹ฌ์ด ์žˆ์œผ์‹œ๋‹ค๋ฉด, [Textual Inversion](./training/text_inversion) ํ›ˆ๋ จ ๊ฐ€์ด๋“œ๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. Hugging Face ๊ณ„์ •์œผ๋กœ ๋กœ๊ทธ์ธํ•˜์„ธ์š”: ```py from huggingface_hub import notebook_login notebook_login() ``` ํ•„์š”ํ•œ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๋ฅผ ๋ถˆ๋Ÿฌ์˜ค๊ณ  ์ƒ์„ฑ๋œ ์ด๋ฏธ์ง€๋ฅผ ์‹œ๊ฐํ™”ํ•˜๊ธฐ ์œ„ํ•œ ๋„์šฐ๋ฏธ ํ•จ์ˆ˜ `image_grid`๋ฅผ ๋งŒ๋“ญ๋‹ˆ๋‹ค: ```py import os import torch import PIL from PIL import Image from diffusers import StableDiffusionPipeline from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer def image_grid(imgs, rows, cols): assert len(imgs) == rows * cols w, h = imgs[0].size grid = Image.new("RGB", size=(cols * w, rows * h)) grid_w, grid_h = grid.size for i, img in enumerate(imgs): grid.paste(img, box=(i % cols * w, i // cols * h)) return grid ``` Stable Diffusion๊ณผ [Stable Diffusion Conceptualizer](https://huggingface.co/spaces/sd-concepts-library/stable-diffusion-conceptualizer)์—์„œ ์‚ฌ์ „ํ•™์Šต๋œ ์ปจ์…‰์„ ์„ ํƒํ•ฉ๋‹ˆ๋‹ค: ```py pretrained_model_name_or_path = "runwayml/stable-diffusion-v1-5" repo_id_embeds = "sd-concepts-library/cat-toy" ``` ์ด์ œ ํŒŒ์ดํ”„๋ผ์ธ์„ ๋กœ๋“œํ•˜๊ณ  ์‚ฌ์ „ํ•™์Šต๋œ ์ปจ์…‰์„ ํŒŒ์ดํ”„๋ผ์ธ์— ์ „๋‹ฌํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```py pipeline = StableDiffusionPipeline.from_pretrained(pretrained_model_name_or_path, torch_dtype=torch.float16).to("cuda") pipeline.load_textual_inversion(repo_id_embeds) ``` ํŠน๋ณ„ํ•œ placeholder token '`<cat-toy>`'๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์‚ฌ์ „ํ•™์Šต๋œ ์ปจ์…‰์œผ๋กœ ํ”„๋กฌํ”„ํŠธ๋ฅผ ๋งŒ๋“ค๊ณ , ์ƒ์„ฑํ•  ์ƒ˜ํ”Œ์˜ ์ˆ˜์™€ ์ด๋ฏธ์ง€ ํ–‰์˜ ์ˆ˜๋ฅผ ์„ ํƒํ•ฉ๋‹ˆ๋‹ค: ```py prompt = "a grafitti in a favela wall with a <cat-toy> on it" num_samples = 2 num_rows = 2 ``` ๊ทธ๋Ÿฐ ๋‹ค์Œ ํŒŒ์ดํ”„๋ผ์ธ์„ ์‹คํ–‰ํ•˜๊ณ , ์ƒ์„ฑ๋œ ์ด๋ฏธ์ง€๋“ค์„ ์ €์žฅํ•ฉ๋‹ˆ๋‹ค. ๊ทธ๋ฆฌ๊ณ  ์ฒ˜์Œ์— ๋งŒ๋“ค์—ˆ๋˜ ๋„์šฐ๋ฏธ ํ•จ์ˆ˜ `image_grid`๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์ƒ์„ฑ ๊ฒฐ๊ณผ๋“ค์„ ์‹œ๊ฐํ™”ํ•ฉ๋‹ˆ๋‹ค. ์ด ๋•Œ `num_inference_steps`์™€ `guidance_scale`๊ณผ ๊ฐ™์€ ๋งค๊ฐœ ๋ณ€์ˆ˜๋“ค์„ ์กฐ์ •ํ•˜์—ฌ, ์ด๊ฒƒ๋“ค์ด ์ด๋ฏธ์ง€ ํ’ˆ์งˆ์— ์–ด๋– ํ•œ ์˜ํ–ฅ์„ ๋ฏธ์น˜๋Š”์ง€๋ฅผ ์ž์œ ๋กญ๊ฒŒ ํ™•์ธํ•ด๋ณด์‹œ๊ธฐ ๋ฐ”๋ž๋‹ˆ๋‹ค. ```py all_images = [] for _ in range(num_rows): images = pipe(prompt, num_images_per_prompt=num_samples, num_inference_steps=50, guidance_scale=7.5).images all_images.extend(images) grid = image_grid(all_images, num_samples, num_rows) grid ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/textual_inversion_inference.png"> </div>
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/using-diffusers/img2img.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ํ…์ŠคํŠธ ๊ธฐ๋ฐ˜ image-to-image ์ƒ์„ฑ [[open-in-colab]] [`StableDiffusionImg2ImgPipeline`]์„ ์‚ฌ์šฉํ•˜๋ฉด ํ…์ŠคํŠธ ํ”„๋กฌํ”„ํŠธ์™€ ์‹œ์ž‘ ์ด๋ฏธ์ง€๋ฅผ ์ „๋‹ฌํ•˜์—ฌ ์ƒˆ ์ด๋ฏธ์ง€ ์ƒ์„ฑ์˜ ์กฐ๊ฑด์„ ์ง€์ •ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์‹œ์ž‘ํ•˜๊ธฐ ์ „์— ํ•„์š”ํ•œ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๊ฐ€ ๋ชจ๋‘ ์„ค์น˜๋˜์–ด ์žˆ๋Š”์ง€ ํ™•์ธํ•˜์„ธ์š”: ```bash !pip install diffusers transformers ftfy accelerate ``` [`nitrosocke/Ghibli-Diffusion`](https://huggingface.co/nitrosocke/Ghibli-Diffusion)๊ณผ ๊ฐ™์€ ์‚ฌ์ „ํ•™์Šต๋œ stable diffusion ๋ชจ๋ธ๋กœ [`StableDiffusionImg2ImgPipeline`]์„ ์ƒ์„ฑํ•˜์—ฌ ์‹œ์ž‘ํ•˜์„ธ์š”. ```python import torch import requests from PIL import Image from io import BytesIO from diffusers import StableDiffusionImg2ImgPipeline device = "cuda" pipe = StableDiffusionImg2ImgPipeline.from_pretrained("nitrosocke/Ghibli-Diffusion", torch_dtype=torch.float16).to( device ) ``` ์ดˆ๊ธฐ ์ด๋ฏธ์ง€๋ฅผ ๋‹ค์šด๋กœ๋“œํ•˜๊ณ  ์‚ฌ์ „ ์ฒ˜๋ฆฌํ•˜์—ฌ ํŒŒ์ดํ”„๋ผ์ธ์— ์ „๋‹ฌํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```python url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" response = requests.get(url) init_image = Image.open(BytesIO(response.content)).convert("RGB") init_image.thumbnail((768, 768)) init_image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/image_2_image_using_diffusers_cell_8_output_0.jpeg"/> </div> <Tip> ๐Ÿ’ก `strength`๋Š” ์ž…๋ ฅ ์ด๋ฏธ์ง€์— ์ถ”๊ฐ€๋˜๋Š” ๋…ธ์ด์ฆˆ์˜ ์–‘์„ ์ œ์–ดํ•˜๋Š” 0.0์—์„œ 1.0 ์‚ฌ์ด์˜ ๊ฐ’์ž…๋‹ˆ๋‹ค. 1.0์— ๊ฐ€๊นŒ์šด ๊ฐ’์€ ๋‹ค์–‘ํ•œ ๋ณ€ํ˜•์„ ํ—ˆ์šฉํ•˜์ง€๋งŒ ์ž…๋ ฅ ์ด๋ฏธ์ง€์™€ ์˜๋ฏธ์ ์œผ๋กœ ์ผ์น˜ํ•˜์ง€ ์•Š๋Š” ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค. </Tip> ํ”„๋กฌํ”„ํŠธ๋ฅผ ์ •์˜ํ•˜๊ณ (์ง€๋ธŒ๋ฆฌ ์Šคํƒ€์ผ(Ghibli-style)์— ๋งž๊ฒŒ ์กฐ์ •๋œ ์ด ์ฒดํฌํฌ์ธํŠธ์˜ ๊ฒฝ์šฐ ํ”„๋กฌํ”„ํŠธ ์•ž์— `ghibli style` ํ† ํฐ์„ ๋ถ™์—ฌ์•ผ ํ•ฉ๋‹ˆ๋‹ค) ํŒŒ์ดํ”„๋ผ์ธ์„ ์‹คํ–‰ํ•ฉ๋‹ˆ๋‹ค: ```python prompt = "ghibli style, a fantasy landscape with castles" generator = torch.Generator(device=device).manual_seed(1024) image = pipe(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5, generator=generator).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ghibli-castles.png"/> </div> ๋‹ค๋ฅธ ์Šค์ผ€์ค„๋Ÿฌ๋กœ ์‹คํ—˜ํ•˜์—ฌ ์ถœ๋ ฅ์— ์–ด๋–ค ์˜ํ–ฅ์„ ๋ฏธ์น˜๋Š”์ง€ ํ™•์ธํ•  ์ˆ˜๋„ ์žˆ์Šต๋‹ˆ๋‹ค: ```python from diffusers import LMSDiscreteScheduler lms = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.scheduler = lms generator = torch.Generator(device=device).manual_seed(1024) image = pipe(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5, generator=generator).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lms-ghibli.png"/> </div> ์•„๋ž˜ ๊ณต๋ฐฑ์„ ํ™•์ธํ•˜๊ณ  `strength` ๊ฐ’์„ ๋‹ค๋ฅด๊ฒŒ ์„ค์ •ํ•˜์—ฌ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•ด ๋ณด์„ธ์š”. `strength`๋ฅผ ๋‚ฎ๊ฒŒ ์„ค์ •ํ•˜๋ฉด ์›๋ณธ ์ด๋ฏธ์ง€์™€ ๋” ์œ ์‚ฌํ•œ ์ด๋ฏธ์ง€๊ฐ€ ์ƒ์„ฑ๋˜๋Š” ๊ฒƒ์„ ํ™•์ธํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ž์œ ๋กญ๊ฒŒ ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ [`LMSDiscreteScheduler`]๋กœ ์ „ํ™˜ํ•˜์—ฌ ์ถœ๋ ฅ์— ์–ด๋–ค ์˜ํ–ฅ์„ ๋ฏธ์น˜๋Š”์ง€ ํ™•์ธํ•ด ๋ณด์„ธ์š”. <iframe src="https://stevhliu-ghibli-img2img.hf.space" frameborder="0" width="850" height="500" ></iframe>
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/using-diffusers/loading_overview.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Overview ๐Ÿงจ Diffusers๋Š” ์ƒ์„ฑ ์ž‘์—…์„ ์œ„ํ•œ ๋‹ค์–‘ํ•œ ํŒŒ์ดํ”„๋ผ์ธ, ๋ชจ๋ธ, ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ์ œ๊ณตํ•ฉ๋‹ˆ๋‹ค. ์ด๋Ÿฌํ•œ ์ปดํฌ๋„ŒํŠธ๋ฅผ ์ตœ๋Œ€ํ•œ ๊ฐ„๋‹จํ•˜๊ฒŒ ๋กœ๋“œํ•  ์ˆ˜ ์žˆ๋„๋ก ๋‹จ์ผ ํ†ตํ•ฉ ๋ฉ”์„œ๋“œ์ธ `from_pretrained()`๋ฅผ ์ œ๊ณตํ•˜์—ฌ Hugging Face [Hub](https://huggingface.co/models?library=diffusers&sort=downloads) ๋˜๋Š” ๋กœ์ปฌ ๋จธ์‹ ์—์„œ ์ด๋Ÿฌํ•œ ์ปดํฌ๋„ŒํŠธ๋ฅผ ๋ถˆ๋Ÿฌ์˜ฌ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ํŒŒ์ดํ”„๋ผ์ธ์ด๋‚˜ ๋ชจ๋ธ์„ ๋กœ๋“œํ•  ๋•Œ๋งˆ๋‹ค, ์ตœ์‹  ํŒŒ์ผ์ด ์ž๋™์œผ๋กœ ๋‹ค์šด๋กœ๋“œ๋˜๊ณ  ์บ์‹œ๋˜๋ฏ€๋กœ, ๋‹ค์Œ์— ํŒŒ์ผ์„ ๋‹ค์‹œ ๋‹ค์šด๋กœ๋“œํ•˜์ง€ ์•Š๊ณ ๋„ ๋น ๋ฅด๊ฒŒ ์žฌ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด ์„น์…˜์€ ํŒŒ์ดํ”„๋ผ์ธ ๋กœ๋”ฉ, ํŒŒ์ดํ”„๋ผ์ธ์—์„œ ๋‹ค์–‘ํ•œ ์ปดํฌ๋„ŒํŠธ๋ฅผ ๋กœ๋“œํ•˜๋Š” ๋ฐฉ๋ฒ•, ์ฒดํฌํฌ์ธํŠธ variants๋ฅผ ๋ถˆ๋Ÿฌ์˜ค๋Š” ๋ฐฉ๋ฒ•, ๊ทธ๋ฆฌ๊ณ  ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ์„ ๋ถˆ๋Ÿฌ์˜ค๋Š” ๋ฐฉ๋ฒ•์— ๋Œ€ํ•ด ์•Œ์•„์•ผ ํ•  ๋ชจ๋“  ๊ฒƒ๋“ค์„ ๋‹ค๋ฃน๋‹ˆ๋‹ค. ๋˜ํ•œ ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ๋ถˆ๋Ÿฌ์˜ค๋Š” ๋ฐฉ๋ฒ•๊ณผ ์„œ๋กœ ๋‹ค๋ฅธ ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ์‚ฌ์šฉํ•  ๋•Œ ๋ฐœ์ƒํ•˜๋Š” ์†๋„์™€ ํ’ˆ์งˆ๊ฐ„์˜ ํŠธ๋ ˆ์ด๋“œ ์˜คํ”„๋ฅผ ๋น„๊ตํ•˜๋Š” ๋ฐฉ๋ฒ• ์—ญ์‹œ ๋‹ค๋ฃน๋‹ˆ๋‹ค. ๊ทธ๋ฆฌ๊ณ  ๋งˆ์ง€๋ง‰์œผ๋กœ ๐Ÿงจ Diffusers์™€ ํ•จ๊ป˜ ํŒŒ์ดํ† ์น˜์—์„œ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ๋„๋ก KerasCV ์ฒดํฌํฌ์ธํŠธ๋ฅผ ๋ณ€ํ™˜ํ•˜๊ณ  ๋ถˆ๋Ÿฌ์˜ค๋Š” ๋ฐฉ๋ฒ•์„ ์‚ดํŽด๋ด…๋‹ˆ๋‹ค.
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/using-diffusers/other-formats.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ๋‹ค์–‘ํ•œ Stable Diffusion ํฌ๋งท ๋ถˆ๋Ÿฌ์˜ค๊ธฐ Stable Diffusion ๋ชจ๋ธ๋“ค์€ ํ•™์Šต ๋ฐ ์ €์žฅ๋œ ํ”„๋ ˆ์ž„์›Œํฌ์™€ ๋‹ค์šด๋กœ๋“œ ์œ„์น˜์— ๋”ฐ๋ผ ๋‹ค์–‘ํ•œ ํ˜•์‹์œผ๋กœ ์ œ๊ณต๋ฉ๋‹ˆ๋‹ค. ์ด๋Ÿฌํ•œ ํ˜•์‹์„ ๐Ÿค— Diffusers์—์„œ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ๋„๋ก ๋ณ€ํ™˜ํ•˜๋ฉด ์ถ”๋ก ์„ ์œ„ํ•œ [๋‹ค์–‘ํ•œ ์Šค์ผ€์ค„๋Ÿฌ ์‚ฌ์šฉ](schedulers), ์‚ฌ์šฉ์ž ์ง€์ • ํŒŒ์ดํ”„๋ผ์ธ ๊ตฌ์ถ•, ์ถ”๋ก  ์†๋„ ์ตœ์ ํ™”๋ฅผ ์œ„ํ•œ ๋‹ค์–‘ํ•œ ๊ธฐ๋ฒ•๊ณผ ๋ฐฉ๋ฒ• ๋“ฑ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ์—์„œ ์ง€์›ํ•˜๋Š” ๋ชจ๋“  ๊ธฐ๋Šฅ์„ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. <Tip> ์šฐ๋ฆฌ๋Š” `.safetensors` ํ˜•์‹์„ ์ถ”์ฒœํ•ฉ๋‹ˆ๋‹ค. ์™œ๋ƒํ•˜๋ฉด ๊ธฐ์กด์˜ pickled ํŒŒ์ผ์€ ์ทจ์•ฝํ•˜๊ณ  ๋จธ์‹ ์—์„œ ์ฝ”๋“œ๋ฅผ ์‹คํ–‰ํ•  ๋•Œ ์•…์šฉ๋  ์ˆ˜ ์žˆ๋Š” ๊ฒƒ์— ๋น„ํ•ด ํ›จ์”ฌ ๋” ์•ˆ์ „ํ•ฉ๋‹ˆ๋‹ค. (safetensors ๋ถˆ๋Ÿฌ์˜ค๊ธฐ ๊ฐ€์ด๋“œ์—์„œ ์ž์„ธํžˆ ์•Œ์•„๋ณด์„ธ์š”.) </Tip> ์ด ๊ฐ€์ด๋“œ์—์„œ๋Š” ๋‹ค๋ฅธ Stable Diffusion ํ˜•์‹์„ ๐Ÿค— Diffusers์™€ ํ˜ธํ™˜๋˜๋„๋ก ๋ณ€ํ™˜ํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ์„ค๋ช…ํ•ฉ๋‹ˆ๋‹ค. ## PyTorch .ckpt ์ฒดํฌํฌ์ธํŠธ ๋˜๋Š” `.ckpt` ํ˜•์‹์€ ์ผ๋ฐ˜์ ์œผ๋กœ ๋ชจ๋ธ์„ ์ €์žฅํ•˜๋Š” ๋ฐ ์‚ฌ์šฉ๋ฉ๋‹ˆ๋‹ค. `.ckpt` ํŒŒ์ผ์€ ์ „์ฒด ๋ชจ๋ธ์„ ํฌํ•จํ•˜๋ฉฐ ์ผ๋ฐ˜์ ์œผ๋กœ ํฌ๊ธฐ๊ฐ€ ๋ช‡ GB์ž…๋‹ˆ๋‹ค. `.ckpt` ํŒŒ์ผ์„ [~StableDiffusionPipeline.from_ckpt] ๋ฉ”์„œ๋“œ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์ง์ ‘ ๋ถˆ๋Ÿฌ์™€์„œ ์‚ฌ์šฉํ•  ์ˆ˜๋„ ์žˆ์ง€๋งŒ, ์ผ๋ฐ˜์ ์œผ๋กœ ๋‘ ๊ฐ€์ง€ ํ˜•์‹์„ ๋ชจ๋‘ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ๋„๋ก `.ckpt` ํŒŒ์ผ์„ ๐Ÿค— Diffusers๋กœ ๋ณ€ํ™˜ํ•˜๋Š” ๊ฒƒ์ด ๋” ์ข‹์Šต๋‹ˆ๋‹ค. `.ckpt` ํŒŒ์ผ์„ ๋ณ€ํ™˜ํ•˜๋Š” ๋‘ ๊ฐ€์ง€ ์˜ต์…˜์ด ์žˆ์Šต๋‹ˆ๋‹ค. Space๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์ฒดํฌํฌ์ธํŠธ๋ฅผ ๋ณ€ํ™˜ํ•˜๊ฑฐ๋‚˜ ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ `.ckpt` ํŒŒ์ผ์„ ๋ณ€ํ™˜ํ•ฉ๋‹ˆ๋‹ค. ### Space๋กœ ๋ณ€ํ™˜ํ•˜๊ธฐ `.ckpt` ํŒŒ์ผ์„ ๋ณ€ํ™˜ํ•˜๋Š” ๊ฐ€์žฅ ์‰ฝ๊ณ  ํŽธ๋ฆฌํ•œ ๋ฐฉ๋ฒ•์€ SD์—์„œ Diffusers๋กœ ์ŠคํŽ˜์ด์Šค๋ฅผ ์‚ฌ์šฉํ•˜๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค. Space์˜ ์ง€์นจ์— ๋”ฐ๋ผ .ckpt ํŒŒ์ผ์„ ๋ณ€ํ™˜ ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด ์ ‘๊ทผ ๋ฐฉ์‹์€ ๊ธฐ๋ณธ ๋ชจ๋ธ์—์„œ๋Š” ์ž˜ ์ž‘๋™ํ•˜์ง€๋งŒ ๋” ๋งŽ์€ ์‚ฌ์šฉ์ž ์ •์˜ ๋ชจ๋ธ์—์„œ๋Š” ์–ด๋ ค์›€์„ ๊ฒช์„ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋นˆ pull request๋‚˜ ์˜ค๋ฅ˜๋ฅผ ๋ฐ˜ํ™˜ํ•˜๋ฉด Space๊ฐ€ ์‹คํŒจํ•œ ๊ฒƒ์ž…๋‹ˆ๋‹ค. ์ด ๊ฒฝ์šฐ ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ `.ckpt` ํŒŒ์ผ์„ ๋ณ€ํ™˜ํ•ด ๋ณผ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ### ์Šคํฌ๋ฆฝํŠธ๋กœ ๋ณ€ํ™˜ํ•˜๊ธฐ ๐Ÿค— Diffusers๋Š” `.ckpt`ย  ํŒŒ์ผ ๋ณ€ํ™˜์„ ์œ„ํ•œ ๋ณ€ํ™˜ ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์ œ๊ณตํ•ฉ๋‹ˆ๋‹ค. ์ด ์ ‘๊ทผ ๋ฐฉ์‹์€ ์œ„์˜ Space๋ณด๋‹ค ๋” ์•ˆ์ •์ ์ž…๋‹ˆ๋‹ค. ์‹œ์ž‘ํ•˜๊ธฐ ์ „์— ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์‹คํ–‰ํ•  ๐Ÿค— Diffusers์˜ ๋กœ์ปฌ ํด๋ก (clone)์ด ์žˆ๋Š”์ง€ ํ™•์ธํ•˜๊ณ  Hugging Face ๊ณ„์ •์— ๋กœ๊ทธ์ธํ•˜์—ฌ pull request๋ฅผ ์—ด๊ณ  ๋ณ€ํ™˜๋œ ๋ชจ๋ธ์„ ํ—ˆ๋ธŒ์— ํ‘ธ์‹œํ•  ์ˆ˜ ์žˆ๋„๋ก ํ•˜์„ธ์š”. ```bash huggingface-cli login ``` ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์‚ฌ์šฉํ•˜๋ ค๋ฉด: 1. ๋ณ€ํ™˜ํ•˜๋ ค๋Š” `.ckpt`ย  ํŒŒ์ผ์ด ํฌํ•จ๋œ ๋ฆฌํฌ์ง€ํ† ๋ฆฌ๋ฅผ Git์œผ๋กœ ํด๋ก (clone)ํ•ฉ๋‹ˆ๋‹ค. ์ด ์˜ˆ์ œ์—์„œ๋Š” TemporalNet .ckpt ํŒŒ์ผ์„ ๋ณ€ํ™˜ํ•ด ๋ณด๊ฒ ์Šต๋‹ˆ๋‹ค: ```bash git lfs install git clone https://huggingface.co/CiaraRowles/TemporalNet ``` 2. ์ฒดํฌํฌ์ธํŠธ๋ฅผ ๋ณ€ํ™˜ํ•  ๋ฆฌํฌ์ง€ํ† ๋ฆฌ์—์„œ pull request๋ฅผ ์—ฝ๋‹ˆ๋‹ค: ```bash cd TemporalNet && git fetch origin refs/pr/13:pr/13 git checkout pr/13 ``` 3. ๋ณ€ํ™˜ ์Šคํฌ๋ฆฝํŠธ์—์„œ ๊ตฌ์„ฑํ•  ์ž…๋ ฅ ์ธ์ˆ˜๋Š” ์—ฌ๋Ÿฌ ๊ฐ€์ง€๊ฐ€ ์žˆ์ง€๋งŒ ๊ฐ€์žฅ ์ค‘์š”ํ•œ ์ธ์ˆ˜๋Š” ๋‹ค์Œ๊ณผ ๊ฐ™์Šต๋‹ˆ๋‹ค: - `checkpoint_path`: ๋ณ€ํ™˜ํ•  `.ckpt` ํŒŒ์ผ์˜ ๊ฒฝ๋กœ๋ฅผ ์ž…๋ ฅํ•ฉ๋‹ˆ๋‹ค. - `original_config_file`: ์›๋ž˜ ์•„ํ‚คํ…์ฒ˜์˜ ๊ตฌ์„ฑ์„ ์ •์˜ํ•˜๋Š” YAML ํŒŒ์ผ์ž…๋‹ˆ๋‹ค. ์ด ํŒŒ์ผ์„ ์ฐพ์„ ์ˆ˜ ์—†๋Š” ๊ฒฝ์šฐ `.ckpt` ํŒŒ์ผ์„ ์ฐพ์€ GitHub ๋ฆฌํฌ์ง€ํ† ๋ฆฌ์—์„œ YAML ํŒŒ์ผ์„ ๊ฒ€์ƒ‰ํ•ด ๋ณด์„ธ์š”. - `dump_path`: ๋ณ€ํ™˜๋œ ๋ชจ๋ธ์˜ ๊ฒฝ๋กœ ์˜ˆ๋ฅผ ๋“ค์–ด, TemporalNet ๋ชจ๋ธ์€ Stable Diffusion v1.5 ๋ฐ ControlNet ๋ชจ๋ธ์ด๊ธฐ ๋•Œ๋ฌธ์— ControlNet ๋ฆฌํฌ์ง€ํ† ๋ฆฌ์—์„œ cldm_v15.yaml ํŒŒ์ผ์„ ๊ฐ€์ ธ์˜ฌ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. 4. ์ด์ œ ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์‹คํ–‰ํ•˜์—ฌ .ckpt ํŒŒ์ผ์„ ๋ณ€ํ™˜ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```bash python ../diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path temporalnetv3.ckpt --original_config_file cldm_v15.yaml --dump_path ./ --controlnet ``` 5. ๋ณ€ํ™˜์ด ์™„๋ฃŒ๋˜๋ฉด ๋ณ€ํ™˜๋œ ๋ชจ๋ธ์„ ์—…๋กœ๋“œํ•˜๊ณ  ๊ฒฐ๊ณผ๋ฌผ์„ pull requestย [pull request](https://huggingface.co/CiaraRowles/TemporalNet/discussions/13)๋ฅผ ํ…Œ์ŠคํŠธํ•˜์„ธ์š”! ```bash git push origin pr/13:refs/pr/13 ``` ## **Keras .pb or .h5** ๐Ÿงช ์ด ๊ธฐ๋Šฅ์€ ์‹คํ—˜์ ์ธ ๊ธฐ๋Šฅ์ž…๋‹ˆ๋‹ค. ํ˜„์žฌ๋กœ์„œ๋Š” Stable Diffusion v1 ์ฒดํฌํฌ์ธํŠธ๋งŒ ๋ณ€ํ™˜ KerasCV Space์—์„œ ์ง€์›๋ฉ๋‹ˆ๋‹ค. [KerasCV](https://keras.io/keras_cv/)๋Š” [Stable Diffusion](https://github.com/keras-team/keras-cv/blob/master/keras_cv/models/stable_diffusion)ย  v1 ๋ฐ v2์— ๋Œ€ํ•œ ํ•™์Šต์„ ์ง€์›ํ•ฉ๋‹ˆ๋‹ค. ๊ทธ๋Ÿฌ๋‚˜ ์ถ”๋ก  ๋ฐ ๋ฐฐํฌ๋ฅผ ์œ„ํ•œ Stable Diffusion ๋ชจ๋ธ ์‹คํ—˜์„ ์ œํ•œ์ ์œผ๋กœ ์ง€์›ํ•˜๋Š” ๋ฐ˜๋ฉด, ๐Ÿค— Diffusers๋Š” ๋‹ค์–‘ํ•œ [noise schedulers](https://huggingface.co/docs/diffusers/using-diffusers/schedulers),ย [flash attention](https://huggingface.co/docs/diffusers/optimization/xformers), andย [other optimization techniques](https://huggingface.co/docs/diffusers/optimization/fp16) ๋“ฑ ์ด๋Ÿฌํ•œ ๋ชฉ์ ์„ ์œ„ํ•œ ๋ณด๋‹ค ์™„๋ฒฝํ•œ ๊ธฐ๋Šฅ์„ ๊ฐ–์ถ”๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค. [Convert KerasCV](https://huggingface.co/spaces/sayakpaul/convert-kerascv-sd-diffusers)ย Space ๋ณ€ํ™˜์€ `.pb`ย ๋˜๋Š”ย `.h5`์„ PyTorch๋กœ ๋ณ€ํ™˜ํ•œ ๋‹ค์Œ, ์ถ”๋ก ํ•  ์ˆ˜ ์žˆ๋„๋ก [`StableDiffusionPipeline`] ์œผ๋กœ ๊ฐ์‹ธ์„œ ์ค€๋น„ํ•ฉ๋‹ˆ๋‹ค. ๋ณ€ํ™˜๋œ ์ฒดํฌํฌ์ธํŠธ๋Š” Hugging Face Hub์˜ ๋ฆฌํฌ์ง€ํ† ๋ฆฌ์— ์ €์žฅ๋ฉ๋‹ˆ๋‹ค. ์˜ˆ์ œ๋กœ, textual-inversion์œผ๋กœ ํ•™์Šต๋œ `[sayakpaul/textual-inversion-kerasio](https://huggingface.co/sayakpaul/textual-inversion-kerasio/tree/main)`ย ์ฒดํฌํฌ์ธํŠธ๋ฅผ ๋ณ€ํ™˜ํ•ด ๋ณด๊ฒ ์Šต๋‹ˆ๋‹ค. ์ด๊ฒƒ์€ ํŠน์ˆ˜ ํ† ํฐ ย `<my-funny-cat>`์„ ์‚ฌ์šฉํ•˜์—ฌ ๊ณ ์–‘์ด๋กœ ์ด๋ฏธ์ง€๋ฅผ ๊ฐœ์ธํ™”ํ•ฉ๋‹ˆ๋‹ค. KerasCV Space ๋ณ€ํ™˜์—์„œ๋Š” ๋‹ค์Œ์„ ์ž…๋ ฅํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: - Hugging Face ํ† ํฐ. - UNet ๊ณผ ํ…์ŠคํŠธ ์ธ์ฝ”๋”(text encoder) ๊ฐ€์ค‘์น˜๋ฅผ ๋‹ค์šด๋กœ๋“œํ•˜๋Š” ๊ฒฝ๋กœ์ž…๋‹ˆ๋‹ค. ๋ชจ๋ธ์„ ์–ด๋–ป๊ฒŒ ํ•™์Šตํ• ์ง€ ๋ฐฉ์‹์— ๋”ฐ๋ผ, UNet๊ณผ ํ…์ŠคํŠธ ์ธ์ฝ”๋”์˜ ๊ฒฝ๋กœ๋ฅผ ๋ชจ๋‘ ์ œ๊ณตํ•  ํ•„์š”๋Š” ์—†์Šต๋‹ˆ๋‹ค. ์˜ˆ๋ฅผ ๋“ค์–ด, textual-inversion์—๋Š” ํ…์ŠคํŠธ ์ธ์ฝ”๋”์˜ ์ž„๋ฒ ๋”ฉ๋งŒ ํ•„์š”ํ•˜๊ณ  ํ…์ŠคํŠธ-์ด๋ฏธ์ง€(text-to-image) ๋ชจ๋ธ ๋ณ€ํ™˜์—๋Š” UNet ๊ฐ€์ค‘์น˜๋งŒ ํ•„์š”ํ•ฉ๋‹ˆ๋‹ค. - Placeholder ํ† ํฐ์€ textual-inversion ๋ชจ๋ธ์—๋งŒ ์ ์šฉ๋ฉ๋‹ˆ๋‹ค. - `output_repo_prefix`๋Š” ๋ณ€ํ™˜๋œ ๋ชจ๋ธ์ด ์ €์žฅ๋˜๋Š” ๋ฆฌํฌ์ง€ํ† ๋ฆฌ์˜ ์ด๋ฆ„์ž…๋‹ˆ๋‹ค. **Submit**ย (์ œ์ถœ) ๋ฒ„ํŠผ์„ ํด๋ฆญํ•˜๋ฉด KerasCV ์ฒดํฌํฌ์ธํŠธ๊ฐ€ ์ž๋™์œผ๋กœ ๋ณ€ํ™˜๋ฉ๋‹ˆ๋‹ค! ์ฒดํฌํฌ์ธํŠธ๊ฐ€ ์„ฑ๊ณต์ ์œผ๋กœ ๋ณ€ํ™˜๋˜๋ฉด, ๋ณ€ํ™˜๋œ ์ฒดํฌํฌ์ธํŠธ๊ฐ€ ํฌํ•จ๋œ ์ƒˆ ๋ฆฌํฌ์ง€ํ† ๋ฆฌ๋กœ ์—ฐ๊ฒฐ๋˜๋Š” ๋งํฌ๊ฐ€ ํ‘œ์‹œ๋ฉ๋‹ˆ๋‹ค. ์ƒˆ ๋ฆฌํฌ์ง€ํ† ๋ฆฌ๋กœ ์—ฐ๊ฒฐ๋˜๋Š” ๋งํฌ๋ฅผ ๋”ฐ๋ผ๊ฐ€๋ฉด ๋ณ€ํ™˜๋œ ๋ชจ๋ธ์„ ์‚ฌ์šฉํ•ด ๋ณผ ์ˆ˜ ์žˆ๋Š” ์ถ”๋ก  ์œ„์ ฏ์ด ํฌํ•จ๋œ ๋ชจ๋ธ ์นด๋“œ๊ฐ€ ์ƒ์„ฑ๋œ KerasCV Space ๋ณ€ํ™˜์„ ํ™•์ธํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ฝ”๋“œ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์ถ”๋ก ์„ ์‹คํ–‰ํ•˜๋ ค๋ฉด ๋ชจ๋ธ ์นด๋“œ์˜ ์˜ค๋ฅธ์ชฝ ์ƒ๋‹จ ๋ชจ์„œ๋ฆฌ์— ์žˆ๋Š” **Use in Diffusers**ย  ๋ฒ„ํŠผ์„ ํด๋ฆญํ•˜์—ฌ ์˜ˆ์‹œ ์ฝ”๋“œ๋ฅผ ๋ณต์‚ฌํ•˜์—ฌ ๋ถ™์—ฌ๋„ฃ์Šต๋‹ˆ๋‹ค: ```py from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained("sayakpaul/textual-inversion-cat-kerascv_sd_diffusers_pipeline") ``` ๊ทธ๋Ÿฌ๋ฉด ๋‹ค์Œ๊ณผ ๊ฐ™์€ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```py from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained("sayakpaul/textual-inversion-cat-kerascv_sd_diffusers_pipeline") pipeline.to("cuda") placeholder_token = "<my-funny-cat-token>" prompt = f"two {placeholder_token} getting married, photorealistic, high quality" image = pipeline(prompt, num_inference_steps=50).images[0] ``` ## **A1111 LoRA files** [Automatic1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui)ย (A1111)์€ Stable Diffusion์„ ์œ„ํ•ด ๋„๋ฆฌ ์‚ฌ์šฉ๋˜๋Š” ์›น UI๋กœ,ย [Civitai](https://civitai.com/) ์™€ ๊ฐ™์€ ๋ชจ๋ธ ๊ณต์œ  ํ”Œ๋žซํผ์„ ์ง€์›ํ•ฉ๋‹ˆ๋‹ค. ํŠนํžˆ LoRA ๊ธฐ๋ฒ•์œผ๋กœ ํ•™์Šต๋œ ๋ชจ๋ธ์€ ํ•™์Šต ์†๋„๊ฐ€ ๋น ๋ฅด๊ณ  ์™„์ „ํžˆ ํŒŒ์ธํŠœ๋‹๋œ ๋ชจ๋ธ๋ณด๋‹ค ํŒŒ์ผ ํฌ๊ธฐ๊ฐ€ ํ›จ์”ฌ ์ž‘๊ธฐ ๋•Œ๋ฌธ์— ์ธ๊ธฐ๊ฐ€ ๋†’์Šต๋‹ˆ๋‹ค. ๐Ÿค— Diffusers๋Š” [`~loaders.LoraLoaderMixin.load_lora_weights`]:๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ A1111 LoRA ์ฒดํฌํฌ์ธํŠธ ๋ถˆ๋Ÿฌ์˜ค๊ธฐ๋ฅผ ์ง€์›ํ•ฉ๋‹ˆ๋‹ค: ```py from diffusers import DiffusionPipeline, UniPCMultistepScheduler import torch pipeline = DiffusionPipeline.from_pretrained( "andite/anything-v4.0", torch_dtype=torch.float16, safety_checker=None ).to("cuda") pipeline.scheduler = UniPCMultistepScheduler.from_config(pipeline.scheduler.config) ``` Civitai์—์„œ LoRA ์ฒดํฌํฌ์ธํŠธ๋ฅผ ๋‹ค์šด๋กœ๋“œํ•˜์„ธ์š”; ์ด ์˜ˆ์ œ์—์„œ๋Š” ย [Howls Moving Castle,Interior/Scenery LoRA (Ghibli Stlye)](https://civitai.com/models/14605?modelVersionId=19998) ์ฒดํฌํฌ์ธํŠธ๋ฅผ ์‚ฌ์šฉํ–ˆ์ง€๋งŒ, ์–ด๋–ค LoRA ์ฒดํฌํฌ์ธํŠธ๋“  ์ž์œ ๋กญ๊ฒŒ ์‚ฌ์šฉํ•ด ๋ณด์„ธ์š”! ```bash !wget https://civitai.com/api/download/models/19998 -O howls_moving_castle.safetensors ``` ๋ฉ”์„œ๋“œ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ํŒŒ์ดํ”„๋ผ์ธ์— LoRA ์ฒดํฌํฌ์ธํŠธ๋ฅผ ๋ถˆ๋Ÿฌ์˜ต๋‹ˆ๋‹ค: ```py pipeline.load_lora_weights(".", weight_name="howls_moving_castle.safetensors") ``` ์ด์ œ ํŒŒ์ดํ”„๋ผ์ธ์„ ์‚ฌ์šฉํ•˜์—ฌ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```py prompt = "masterpiece, illustration, ultra-detailed, cityscape, san francisco, golden gate bridge, california, bay area, in the snow, beautiful detailed starry sky" negative_prompt = "lowres, cropped, worst quality, low quality, normal quality, artifacts, signature, watermark, username, blurry, more than one bridge, bad architecture" images = pipeline( prompt=prompt, negative_prompt=negative_prompt, width=512, height=512, num_inference_steps=25, num_images_per_prompt=4, generator=torch.manual_seed(0), ).images ``` ๋งˆ์ง€๋ง‰์œผ๋กœ, ๋””์Šคํ”Œ๋ ˆ์ด์— ์ด๋ฏธ์ง€๋ฅผ ํ‘œ์‹œํ•˜๋Š” ํ—ฌํผ ํ•จ์ˆ˜๋ฅผ ๋งŒ๋“ญ๋‹ˆ๋‹ค: ```py from PIL import Image def image_grid(imgs, rows=2, cols=2): w, h = imgs[0].size grid = Image.new("RGB", size=(cols * w, rows * h)) for i, img in enumerate(imgs): grid.paste(img, box=(i % cols * w, i // cols * h)) return grid image_grid(images) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/a1111-lora-sf.png" /> </div>
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/using-diffusers/custom_pipeline_overview.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ์ปค์Šคํ…€ ํŒŒ์ดํ”„๋ผ์ธ ๋ถˆ๋Ÿฌ์˜ค๊ธฐ [[open-in-colab]] ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ์€ ๋…ผ๋ฌธ์— ๋ช…์‹œ๋œ ์›๋ž˜์˜ ๊ตฌํ˜„์ฒด์™€ ๋‹ค๋ฅธ ํ˜•ํƒœ๋กœ ๊ตฌํ˜„๋œ ๋ชจ๋“  [`DiffusionPipeline`] ํด๋ž˜์Šค๋ฅผ ์˜๋ฏธํ•ฉ๋‹ˆ๋‹ค. (์˜ˆ๋ฅผ ๋“ค์–ด, [`StableDiffusionControlNetPipeline`]๋Š” ["Text-to-Image Generation with ControlNet Conditioning"](https://arxiv.org/abs/2302.05543) ํ•ด๋‹น) ์ด๋“ค์€ ์ถ”๊ฐ€ ๊ธฐ๋Šฅ์„ ์ œ๊ณตํ•˜๊ฑฐ๋‚˜ ํŒŒ์ดํ”„๋ผ์ธ์˜ ์›๋ž˜ ๊ตฌํ˜„์„ ํ™•์žฅํ•ฉ๋‹ˆ๋‹ค. [Speech to Image](https://github.com/huggingface/diffusers/tree/main/examples/community#speech-to-image) ๋˜๋Š” [Composable Stable Diffusion](https://github.com/huggingface/diffusers/tree/main/examples/community#composable-stable-diffusion) ๊ณผ ๊ฐ™์€ ๋ฉ‹์ง„ ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ์ด ๋งŽ์ด ์žˆ์œผ๋ฉฐ [์—ฌ๊ธฐ์—์„œ](https://github.com/huggingface/diffusers/tree/main/examples/community) ๋ชจ๋“  ๊ณต์‹ ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ์„ ์ฐพ์„ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ํ—ˆ๋ธŒ์—์„œ ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ์„ ๋กœ๋“œํ•˜๋ ค๋ฉด, ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ์˜ ๋ฆฌํฌ์ง€ํ† ๋ฆฌ ID์™€ (ํŒŒ์ดํ”„๋ผ์ธ ๊ฐ€์ค‘์น˜ ๋ฐ ๊ตฌ์„ฑ ์š”์†Œ๋ฅผ ๋กœ๋“œํ•˜๋ ค๋Š”) ๋ชจ๋ธ์˜ ๋ฆฌํฌ์ง€ํ† ๋ฆฌ ID๋ฅผ ์ธ์ž๋กœ ์ „๋‹ฌํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ์˜ˆ๋ฅผ ๋“ค์–ด, ์•„๋ž˜ ์˜ˆ์‹œ์—์„œ๋Š” `hf-internal-testing/diffusers-dummy-pipeline`์—์„œ ๋”๋ฏธ ํŒŒ์ดํ”„๋ผ์ธ์„ ๋ถˆ๋Ÿฌ์˜ค๊ณ , `google/ddpm-cifar10-32`์—์„œ ํŒŒ์ดํ”„๋ผ์ธ์˜ ๊ฐ€์ค‘์น˜์™€ ์ปดํฌ๋„ŒํŠธ๋“ค์„ ๋กœ๋“œํ•ฉ๋‹ˆ๋‹ค. <Tip warning={true}> ๐Ÿ”’ ํ—ˆ๊น… ํŽ˜์ด์Šค ํ—ˆ๋ธŒ์—์„œ ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ์„ ๋ถˆ๋Ÿฌ์˜ค๋Š” ๊ฒƒ์€ ๊ณง ํ•ด๋‹น ์ฝ”๋“œ๊ฐ€ ์•ˆ์ „ํ•˜๋‹ค๊ณ  ์‹ ๋ขฐํ•˜๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค. ์ฝ”๋“œ๋ฅผ ์ž๋™์œผ๋กœ ๋ถˆ๋Ÿฌ์˜ค๊ณ  ์‹คํ–‰ํ•˜๊ธฐ ์•ž์„œ ๋ฐ˜๋“œ์‹œ ์˜จ๋ผ์ธ์œผ๋กœ ํ•ด๋‹น ์ฝ”๋“œ์˜ ์‹ ๋ขฐ์„ฑ์„ ๊ฒ€์‚ฌํ•˜์„ธ์š”! </Tip> ```py from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "google/ddpm-cifar10-32", custom_pipeline="hf-internal-testing/diffusers-dummy-pipeline" ) ``` ๊ณต์‹ ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ์„ ๋ถˆ๋Ÿฌ์˜ค๋Š” ๊ฒƒ์€ ๋น„์Šทํ•˜์ง€๋งŒ, ๊ณต์‹ ๋ฆฌํฌ์ง€ํ† ๋ฆฌ ID์—์„œ ๊ฐ€์ค‘์น˜๋ฅผ ๋ถˆ๋Ÿฌ์˜ค๋Š” ๊ฒƒ๊ณผ ๋”๋ถˆ์–ด ํ•ด๋‹น ํŒŒ์ดํ”„๋ผ์ธ ๋‚ด์˜ ์ปดํฌ๋„ŒํŠธ๋ฅผ ์ง์ ‘ ์ง€์ •ํ•˜๋Š” ๊ฒƒ ์—ญ์‹œ ๊ฐ€๋Šฅํ•ฉ๋‹ˆ๋‹ค. ์•„๋ž˜ ์˜ˆ์ œ๋ฅผ ๋ณด๋ฉด ์ปค๋ฎค๋‹ˆํ‹ฐ [CLIP Guided Stable Diffusion](https://github.com/huggingface/diffusers/tree/main/examples/community#clip-guided-stable-diffusion) ํŒŒ์ดํ”„๋ผ์ธ์„ ๋กœ๋“œํ•  ๋•Œ, ํ•ด๋‹น ํŒŒ์ดํ”„๋ผ์ธ์—์„œ ์‚ฌ์šฉํ•  `clip_model` ์ปดํฌ๋„ŒํŠธ์™€ `feature_extractor` ์ปดํฌ๋„ŒํŠธ๋ฅผ ์ง์ ‘ ์„ค์ •ํ•˜๋Š” ๊ฒƒ์„ ํ™•์ธํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ```py from diffusers import DiffusionPipeline from transformers import CLIPImageProcessor, CLIPModel clip_model_id = "laion/CLIP-ViT-B-32-laion2B-s34B-b79K" feature_extractor = CLIPImageProcessor.from_pretrained(clip_model_id) clip_model = CLIPModel.from_pretrained(clip_model_id) pipeline = DiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", custom_pipeline="clip_guided_stable_diffusion", clip_model=clip_model, feature_extractor=feature_extractor, ) ``` ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ์— ๋Œ€ํ•œ ์ž์„ธํ•œ ๋‚ด์šฉ์€ [์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ](https://github.com/huggingface/diffusers/blob/main/docs/source/en/using-diffusers/custom_pipeline_examples) ๊ฐ€์ด๋“œ๋ฅผ ์‚ดํŽด๋ณด์„ธ์š”. ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ ๋“ฑ๋ก์— ๊ด€์‹ฌ์ด ์žˆ๋Š” ๊ฒฝ์šฐ [์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ์— ๊ธฐ์—ฌํ•˜๋Š” ๋ฐฉ๋ฒ•](https://github.com/huggingface/diffusers/blob/main/docs/source/en/using-diffusers/contribute_pipeline)์— ๋Œ€ํ•œ ๊ฐ€์ด๋“œ๋ฅผ ํ™•์ธํ•˜์„ธ์š” !
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/using-diffusers/write_own_pipeline.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ํŒŒ์ดํ”„๋ผ์ธ, ๋ชจ๋ธ ๋ฐ ์Šค์ผ€์ค„๋Ÿฌ ์ดํ•ดํ•˜๊ธฐ [[open-in-colab]] ๐Ÿงจ Diffusers๋Š” ์‚ฌ์šฉ์ž ์นœํ™”์ ์ด๋ฉฐ ์œ ์—ฐํ•œ ๋„๊ตฌ ์ƒ์ž๋กœ, ์‚ฌ์šฉ์‚ฌ๋ก€์— ๋งž๊ฒŒ diffusion ์‹œ์Šคํ…œ์„ ๊ตฌ์ถ• ํ•  ์ˆ˜ ์žˆ๋„๋ก ์„ค๊ณ„๋˜์—ˆ์Šต๋‹ˆ๋‹ค. ์ด ๋„๊ตฌ ์ƒ์ž์˜ ํ•ต์‹ฌ์€ ๋ชจ๋ธ๊ณผ ์Šค์ผ€์ค„๋Ÿฌ์ž…๋‹ˆ๋‹ค. [`DiffusionPipeline`]์€ ํŽธ์˜๋ฅผ ์œ„ํ•ด ์ด๋Ÿฌํ•œ ๊ตฌ์„ฑ ์š”์†Œ๋ฅผ ๋ฒˆ๋“ค๋กœ ์ œ๊ณตํ•˜์ง€๋งŒ, ํŒŒ์ดํ”„๋ผ์ธ์„ ๋ถ„๋ฆฌํ•˜๊ณ  ๋ชจ๋ธ๊ณผ ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ๊ฐœ๋ณ„์ ์œผ๋กœ ์‚ฌ์šฉํ•ด ์ƒˆ๋กœ์šด diffusion ์‹œ์Šคํ…œ์„ ๋งŒ๋“ค ์ˆ˜๋„ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด ํŠœํ† ๋ฆฌ์–ผ์—์„œ๋Š” ๊ธฐ๋ณธ ํŒŒ์ดํ”„๋ผ์ธ๋ถ€ํ„ฐ ์‹œ์ž‘ํ•ด Stable Diffusion ํŒŒ์ดํ”„๋ผ์ธ๊นŒ์ง€ ์ง„ํ–‰ํ•˜๋ฉฐ ๋ชจ๋ธ๊ณผ ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ์‚ฌ์šฉํ•ด ์ถ”๋ก ์„ ์œ„ํ•œ diffusion ์‹œ์Šคํ…œ์„ ์กฐ๋ฆฝํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ๋ฐฐ์›๋‹ˆ๋‹ค. ## ๊ธฐ๋ณธ ํŒŒ์ดํ”„๋ผ์ธ ํ•ด์ฒดํ•˜๊ธฐ ํŒŒ์ดํ”„๋ผ์ธ์€ ์ถ”๋ก ์„ ์œ„ํ•ด ๋ชจ๋ธ์„ ์‹คํ–‰ํ•˜๋Š” ๋น ๋ฅด๊ณ  ์‰ฌ์šด ๋ฐฉ๋ฒ•์œผ๋กœ, ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•˜๋Š” ๋ฐ ์ฝ”๋“œ๊ฐ€ 4์ค„ ์ด์ƒ ํ•„์š”ํ•˜์ง€ ์•Š์Šต๋‹ˆ๋‹ค: ```py >>> from diffusers import DDPMPipeline >>> ddpm = DDPMPipeline.from_pretrained("google/ddpm-cat-256").to("cuda") >>> image = ddpm(num_inference_steps=25).images[0] >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ddpm-cat.png" alt="Image of cat created from DDPMPipeline"/> </div> ์ •๋ง ์‰ฝ์Šต๋‹ˆ๋‹ค. ๊ทธ๋Ÿฐ๋ฐ ํŒŒ์ดํ”„๋ผ์ธ์€ ์–ด๋–ป๊ฒŒ ์ด๋ ‡๊ฒŒ ํ•  ์ˆ˜ ์žˆ์—ˆ์„๊นŒ์š”? ํŒŒ์ดํ”„๋ผ์ธ์„ ์„ธ๋ถ„ํ™”ํ•˜์—ฌ ๋‚ด๋ถ€์—์„œ ์–ด๋–ค ์ผ์ด ์ผ์–ด๋‚˜๊ณ  ์žˆ๋Š”์ง€ ์‚ดํŽด๋ณด๊ฒ ์Šต๋‹ˆ๋‹ค. ์œ„ ์˜ˆ์‹œ์—์„œ ํŒŒ์ดํ”„๋ผ์ธ์—๋Š” [`UNet2DModel`] ๋ชจ๋ธ๊ณผ [`DDPMScheduler`]๊ฐ€ ํฌํ•จ๋˜์–ด ์žˆ์Šต๋‹ˆ๋‹ค. ํŒŒ์ดํ”„๋ผ์ธ์€ ์›ํ•˜๋Š” ์ถœ๋ ฅ ํฌ๊ธฐ์˜ ๋žœ๋ค ๋…ธ์ด์ฆˆ๋ฅผ ๋ฐ›์•„ ๋ชจ๋ธ์„ ์—ฌ๋Ÿฌ๋ฒˆ ํ†ต๊ณผ์‹œ์ผœ ์ด๋ฏธ์ง€์˜ ๋…ธ์ด์ฆˆ๋ฅผ ์ œ๊ฑฐํ•ฉ๋‹ˆ๋‹ค. ๊ฐ timestep์—์„œ ๋ชจ๋ธ์€ *noise residual*์„ ์˜ˆ์ธกํ•˜๊ณ  ์Šค์ผ€์ค„๋Ÿฌ๋Š” ์ด๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ๋…ธ์ด์ฆˆ๊ฐ€ ์ ์€ ์ด๋ฏธ์ง€๋ฅผ ์˜ˆ์ธกํ•ฉ๋‹ˆ๋‹ค. ํŒŒ์ดํ”„๋ผ์ธ์€ ์ง€์ •๋œ ์ถ”๋ก  ์Šคํ…์ˆ˜์— ๋„๋‹ฌํ•  ๋•Œ๊นŒ์ง€ ์ด ๊ณผ์ •์„ ๋ฐ˜๋ณตํ•ฉ๋‹ˆ๋‹ค. ๋ชจ๋ธ๊ณผ ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ๋ณ„๋„๋กœ ์‚ฌ์šฉํ•˜์—ฌ ํŒŒ์ดํ”„๋ผ์ธ์„ ๋‹ค์‹œ ์ƒ์„ฑํ•˜๊ธฐ ์œ„ํ•ด ์ž์ฒด์ ์ธ ๋…ธ์ด์ฆˆ ์ œ๊ฑฐ ํ”„๋กœ์„ธ์Šค๋ฅผ ์ž‘์„ฑํ•ด ๋ณด๊ฒ ์Šต๋‹ˆ๋‹ค. 1. ๋ชจ๋ธ๊ณผ ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ๋ถˆ๋Ÿฌ์˜ต๋‹ˆ๋‹ค: ```py >>> from diffusers import DDPMScheduler, UNet2DModel >>> scheduler = DDPMScheduler.from_pretrained("google/ddpm-cat-256") >>> model = UNet2DModel.from_pretrained("google/ddpm-cat-256").to("cuda") ``` 2. ๋…ธ์ด์ฆˆ ์ œ๊ฑฐ ํ”„๋กœ์„ธ์Šค๋ฅผ ์‹คํ–‰ํ•  timestep ์ˆ˜๋ฅผ ์„ค์ •ํ•ฉ๋‹ˆ๋‹ค: ```py >>> scheduler.set_timesteps(50) ``` 3. ์Šค์ผ€์ค„๋Ÿฌ์˜ timestep์„ ์„ค์ •ํ•˜๋ฉด ๊ท ๋“ฑํ•œ ๊ฐ„๊ฒฉ์˜ ๊ตฌ์„ฑ ์š”์†Œ๋ฅผ ๊ฐ€์ง„ ํ…์„œ๊ฐ€ ์ƒ์„ฑ๋ฉ๋‹ˆ๋‹ค.(์ด ์˜ˆ์‹œ์—์„œ๋Š” 50๊ฐœ) ๊ฐ ์š”์†Œ๋Š” ๋ชจ๋ธ์ด ์ด๋ฏธ์ง€์˜ ๋…ธ์ด์ฆˆ๋ฅผ ์ œ๊ฑฐํ•˜๋Š” ์‹œ๊ฐ„ ๊ฐ„๊ฒฉ์— ํ•ด๋‹นํ•ฉ๋‹ˆ๋‹ค. ๋‚˜์ค‘์— ๋…ธ์ด์ฆˆ ์ œ๊ฑฐ ๋ฃจํ”„๋ฅผ ๋งŒ๋“ค ๋•Œ ์ด ํ…์„œ๋ฅผ ๋ฐ˜๋ณตํ•˜์—ฌ ์ด๋ฏธ์ง€์˜ ๋…ธ์ด์ฆˆ๋ฅผ ์ œ๊ฑฐํ•ฉ๋‹ˆ๋‹ค: ```py >>> scheduler.timesteps tensor([980, 960, 940, 920, 900, 880, 860, 840, 820, 800, 780, 760, 740, 720, 700, 680, 660, 640, 620, 600, 580, 560, 540, 520, 500, 480, 460, 440, 420, 400, 380, 360, 340, 320, 300, 280, 260, 240, 220, 200, 180, 160, 140, 120, 100, 80, 60, 40, 20, 0]) ``` 4. ์›ํ•˜๋Š” ์ถœ๋ ฅ๊ณผ ๊ฐ™์€ ๋ชจ์–‘์„ ๊ฐ€์ง„ ๋žœ๋ค ๋…ธ์ด์ฆˆ๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค: ```py >>> import torch >>> sample_size = model.config.sample_size >>> noise = torch.randn((1, 3, sample_size, sample_size), device="cuda") ``` 5. ์ด์ œ timestep์„ ๋ฐ˜๋ณตํ•˜๋Š” ๋ฃจํ”„๋ฅผ ์ž‘์„ฑํ•ฉ๋‹ˆ๋‹ค. ๊ฐ timestep์—์„œ ๋ชจ๋ธ์€ [`UNet2DModel.forward`]๋ฅผ ํ†ตํ•ด noisy residual์„ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค. ์Šค์ผ€์ค„๋Ÿฌ์˜ [`~DDPMScheduler.step`] ๋ฉ”์„œ๋“œ๋Š” noisy residual, timestep, ๊ทธ๋ฆฌ๊ณ  ์ž…๋ ฅ์„ ๋ฐ›์•„ ์ด์ „ timestep์—์„œ ์ด๋ฏธ์ง€๋ฅผ ์˜ˆ์ธกํ•ฉ๋‹ˆ๋‹ค. ์ด ์ถœ๋ ฅ์€ ๋…ธ์ด์ฆˆ ์ œ๊ฑฐ ๋ฃจํ”„์˜ ๋ชจ๋ธ์— ๋Œ€ํ•œ ๋‹ค์Œ ์ž…๋ ฅ์ด ๋˜๋ฉฐ, `timesteps` ๋ฐฐ์—ด์˜ ๋์— ๋„๋‹ฌํ•  ๋•Œ๊นŒ์ง€ ๋ฐ˜๋ณต๋ฉ๋‹ˆ๋‹ค. ```py >>> input = noise >>> for t in scheduler.timesteps: ... with torch.no_grad(): ... noisy_residual = model(input, t).sample ... previous_noisy_sample = scheduler.step(noisy_residual, t, input).prev_sample ... input = previous_noisy_sample ``` ์ด๊ฒƒ์ด ์ „์ฒด ๋…ธ์ด์ฆˆ ์ œ๊ฑฐ ํ”„๋กœ์„ธ์Šค์ด๋ฉฐ, ๋™์ผํ•œ ํŒจํ„ด์„ ์‚ฌ์šฉํ•ด ๋ชจ๋“  diffusion ์‹œ์Šคํ…œ์„ ์ž‘์„ฑํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. 6. ๋งˆ์ง€๋ง‰ ๋‹จ๊ณ„๋Š” ๋…ธ์ด์ฆˆ๊ฐ€ ์ œ๊ฑฐ๋œ ์ถœ๋ ฅ์„ ์ด๋ฏธ์ง€๋กœ ๋ณ€ํ™˜ํ•˜๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค: ```py >>> from PIL import Image >>> import numpy as np >>> image = (input / 2 + 0.5).clamp(0, 1) >>> image = image.cpu().permute(0, 2, 3, 1).numpy()[0] >>> image = Image.fromarray((image * 255).round().astype("uint8")) >>> image ``` ๋‹ค์Œ ์„น์…˜์—์„œ๋Š” ์—ฌ๋Ÿฌ๋ถ„์˜ ๊ธฐ์ˆ ์„ ์‹œํ—˜ํ•ด๋ณด๊ณ  ์ข€ ๋” ๋ณต์žกํ•œ Stable Diffusion ํŒŒ์ดํ”„๋ผ์ธ์„ ๋ถ„์„ํ•ด ๋ณด๊ฒ ์Šต๋‹ˆ๋‹ค. ๋ฐฉ๋ฒ•์€ ๊ฑฐ์˜ ๋™์ผํ•ฉ๋‹ˆ๋‹ค. ํ•„์š”ํ•œ ๊ตฌ์„ฑ์š”์†Œ๋“ค์„ ์ดˆ๊ธฐํ™”ํ•˜๊ณ  timestep์ˆ˜๋ฅผ ์„ค์ •ํ•˜์—ฌ `timestep` ๋ฐฐ์—ด์„ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค. ๋…ธ์ด์ฆˆ ์ œ๊ฑฐ ๋ฃจํ”„์—์„œ `timestep` ๋ฐฐ์—ด์ด ์‚ฌ์šฉ๋˜๋ฉฐ, ์ด ๋ฐฐ์—ด์˜ ๊ฐ ์š”์†Œ์— ๋Œ€ํ•ด ๋ชจ๋ธ์€ ๋…ธ์ด์ฆˆ๊ฐ€ ์ ์€ ์ด๋ฏธ์ง€๋ฅผ ์˜ˆ์ธกํ•ฉ๋‹ˆ๋‹ค. ๋…ธ์ด์ฆˆ ์ œ๊ฑฐ ๋ฃจํ”„๋Š” `timestep`์„ ๋ฐ˜๋ณตํ•˜๊ณ  ๊ฐ timestep์—์„œ noise residual์„ ์ถœ๋ ฅํ•˜๊ณ  ์Šค์ผ€์ค„๋Ÿฌ๋Š” ์ด๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์ด์ „ timestep์—์„œ ๋…ธ์ด์ฆˆ๊ฐ€ ๋œํ•œ ์ด๋ฏธ์ง€๋ฅผ ์˜ˆ์ธกํ•ฉ๋‹ˆ๋‹ค. ์ด ํ”„๋กœ์„ธ์Šค๋Š” `timestep` ๋ฐฐ์—ด์˜ ๋์— ๋„๋‹ฌํ•  ๋•Œ๊นŒ์ง€ ๋ฐ˜๋ณต๋ฉ๋‹ˆ๋‹ค. ํ•œ๋ฒˆ ์‚ฌ์šฉํ•ด ๋ด…์‹œ๋‹ค! ## Stable Diffusion ํŒŒ์ดํ”„๋ผ์ธ ํ•ด์ฒดํ•˜๊ธฐ Stable Diffusion ์€ text-to-image *latent diffusion* ๋ชจ๋ธ์ž…๋‹ˆ๋‹ค. latent diffusion ๋ชจ๋ธ์ด๋ผ๊ณ  ๋ถˆ๋ฆฌ๋Š” ์ด์œ ๋Š” ์‹ค์ œ ํ”ฝ์…€ ๊ณต๊ฐ„ ๋Œ€์‹  ์ด๋ฏธ์ง€์˜ ์ €์ฐจ์›์˜ ํ‘œํ˜„์œผ๋กœ ์ž‘์—…ํ•˜๊ธฐ ๋•Œ๋ฌธ์ด๊ณ , ๋ฉ”๋ชจ๋ฆฌ ํšจ์œจ์ด ๋” ๋†’์Šต๋‹ˆ๋‹ค. ์ธ์ฝ”๋”๋Š” ์ด๋ฏธ์ง€๋ฅผ ๋” ์ž‘์€ ํ‘œํ˜„์œผ๋กœ ์••์ถ•ํ•˜๊ณ , ๋””์ฝ”๋”๋Š” ์••์ถ•๋œ ํ‘œํ˜„์„ ๋‹ค์‹œ ์ด๋ฏธ์ง€๋กœ ๋ณ€ํ™˜ํ•ฉ๋‹ˆ๋‹ค. text-to-image ๋ชจ๋ธ์˜ ๊ฒฝ์šฐ ํ…์ŠคํŠธ ์ž„๋ฒ ๋”ฉ์„ ์ƒ์„ฑํ•˜๊ธฐ ์œ„ํ•ด tokenizer์™€ ์ธ์ฝ”๋”๊ฐ€ ํ•„์š”ํ•ฉ๋‹ˆ๋‹ค. ์ด์ „ ์˜ˆ์ œ์—์„œ ์ด๋ฏธ UNet ๋ชจ๋ธ๊ณผ ์Šค์ผ€์ค„๋Ÿฌ๊ฐ€ ํ•„์š”ํ•˜๋‹ค๋Š” ๊ฒƒ์€ ์•Œ๊ณ  ๊ณ„์…จ์„ ๊ฒƒ์ž…๋‹ˆ๋‹ค. ๋ณด์‹œ๋‹ค์‹œํ”ผ, ์ด๊ฒƒ์€ UNet ๋ชจ๋ธ๋งŒ ํฌํ•จ๋œ DDPM ํŒŒ์ดํ”„๋ผ์ธ๋ณด๋‹ค ๋” ๋ณต์žกํ•ฉ๋‹ˆ๋‹ค. Stable Diffusion ๋ชจ๋ธ์—๋Š” ์„ธ ๊ฐœ์˜ ๊ฐœ๋ณ„ ์‚ฌ์ „ํ•™์Šต๋œ ๋ชจ๋ธ์ด ์žˆ์Šต๋‹ˆ๋‹ค. <Tip> ๐Ÿ’ก VAE, UNet ๋ฐ ํ…์ŠคํŠธ ์ธ์ฝ”๋” ๋ชจ๋ธ์˜ ์ž‘๋™๋ฐฉ์‹์— ๋Œ€ํ•œ ์ž์„ธํ•œ ๋‚ด์šฉ์€ [How does Stable Diffusion work?](https://huggingface.co/blog/stable_diffusion#how-does-stable-diffusion-work) ๋ธ”๋กœ๊ทธ๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. </Tip> ์ด์ œ Stable Diffusion ํŒŒ์ดํ”„๋ผ์ธ์— ํ•„์š”ํ•œ ๊ตฌ์„ฑ์š”์†Œ๋“ค์ด ๋ฌด์—‡์ธ์ง€ ์•Œ์•˜์œผ๋‹ˆ, [`~ModelMixin.from_pretrained`] ๋ฉ”์„œ๋“œ๋ฅผ ์‚ฌ์šฉํ•ด ๋ชจ๋“  ๊ตฌ์„ฑ์š”์†Œ๋ฅผ ๋ถˆ๋Ÿฌ์˜ต๋‹ˆ๋‹ค. ์‚ฌ์ „ํ•™์Šต๋œ ์ฒดํฌํฌ์ธํŠธ [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5)์—์„œ ์ฐพ์„ ์ˆ˜ ์žˆ์œผ๋ฉฐ, ๊ฐ ๊ตฌ์„ฑ์š”์†Œ๋“ค์€ ๋ณ„๋„์˜ ํ•˜์œ„ ํด๋”์— ์ €์žฅ๋˜์–ด ์žˆ์Šต๋‹ˆ๋‹ค: ```py >>> from PIL import Image >>> import torch >>> from transformers import CLIPTextModel, CLIPTokenizer >>> from diffusers import AutoencoderKL, UNet2DConditionModel, PNDMScheduler >>> vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae") >>> tokenizer = CLIPTokenizer.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="tokenizer") >>> text_encoder = CLIPTextModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="text_encoder") >>> unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="unet") ``` ๊ธฐ๋ณธ [`PNDMScheduler`] ๋Œ€์‹ , [`UniPCMultistepScheduler`]๋กœ ๊ต์ฒดํ•˜์—ฌ ๋‹ค๋ฅธ ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ์–ผ๋งˆ๋‚˜ ์‰ฝ๊ฒŒ ์—ฐ๊ฒฐํ•  ์ˆ˜ ์žˆ๋Š”์ง€ ํ™•์ธํ•ฉ๋‹ˆ๋‹ค: ```py >>> from diffusers import UniPCMultistepScheduler >>> scheduler = UniPCMultistepScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler") ``` ์ถ”๋ก  ์†๋„๋ฅผ ๋†’์ด๋ ค๋ฉด ์Šค์ผ€์ค„๋Ÿฌ์™€ ๋‹ฌ๋ฆฌ ํ•™์Šต ๊ฐ€๋Šฅํ•œ ๊ฐ€์ค‘์น˜๊ฐ€ ์žˆ์œผ๋ฏ€๋กœ ๋ชจ๋ธ์„ GPU๋กœ ์˜ฎ๊ธฐ์„ธ์š”: ```py >>> torch_device = "cuda" >>> vae.to(torch_device) >>> text_encoder.to(torch_device) >>> unet.to(torch_device) ``` ### ํ…์ŠคํŠธ ์ž„๋ฒ ๋”ฉ ์ƒ์„ฑํ•˜๊ธฐ ๋‹ค์Œ ๋‹จ๊ณ„๋Š” ์ž„๋ฒ ๋”ฉ์„ ์ƒ์„ฑํ•˜๊ธฐ ์œ„ํ•ด ํ…์ŠคํŠธ๋ฅผ ํ† ํฐํ™”ํ•˜๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค. ์ด ํ…์ŠคํŠธ๋Š” UNet ๋ชจ๋ธ์—์„œ condition์œผ๋กœ ์‚ฌ์šฉ๋˜๊ณ  ์ž…๋ ฅ ํ”„๋กฌํ”„ํŠธ์™€ ์œ ์‚ฌํ•œ ๋ฐฉํ–ฅ์œผ๋กœ diffusion ํ”„๋กœ์„ธ์Šค๋ฅผ ์กฐ์ •ํ•˜๋Š” ๋ฐ ์‚ฌ์šฉ๋ฉ๋‹ˆ๋‹ค. <Tip> ๐Ÿ’ก `guidance_scale` ๋งค๊ฐœ๋ณ€์ˆ˜๋Š” ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•  ๋•Œ ํ”„๋กฌํ”„ํŠธ์— ์–ผ๋งˆ๋‚˜ ๋งŽ์€ ๊ฐ€์ค‘์น˜๋ฅผ ๋ถ€์—ฌํ• ์ง€ ๊ฒฐ์ •ํ•ฉ๋‹ˆ๋‹ค. </Tip> ๋‹ค๋ฅธ ํ”„๋กฌํ”„ํŠธ๋ฅผ ์ƒ์„ฑํ•˜๊ณ  ์‹ถ๋‹ค๋ฉด ์›ํ•˜๋Š” ํ”„๋กฌํ”„ํŠธ๋ฅผ ์ž์œ ๋กญ๊ฒŒ ์„ ํƒํ•˜์„ธ์š”! ```py >>> prompt = ["a photograph of an astronaut riding a horse"] >>> height = 512 # Stable Diffusion์˜ ๊ธฐ๋ณธ ๋†’์ด >>> width = 512 # Stable Diffusion์˜ ๊ธฐ๋ณธ ๋„ˆ๋น„ >>> num_inference_steps = 25 # ๋…ธ์ด์ฆˆ ์ œ๊ฑฐ ์Šคํ… ์ˆ˜ >>> guidance_scale = 7.5 # classifier-free guidance๋ฅผ ์œ„ํ•œ scale >>> generator = torch.manual_seed(0) # ์ดˆ๊ธฐ ์ž ์žฌ ๋…ธ์ด์ฆˆ๋ฅผ ์ƒ์„ฑํ•˜๋Š” seed generator >>> batch_size = len(prompt) ``` ํ…์ŠคํŠธ๋ฅผ ํ† ํฐํ™”ํ•˜๊ณ  ํ”„๋กฌํ”„ํŠธ์—์„œ ์ž„๋ฒ ๋”ฉ์„ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค: ```py >>> text_input = tokenizer( ... prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt" ... ) >>> with torch.no_grad(): ... text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0] ``` ๋˜ํ•œ ํŒจ๋”ฉ ํ† ํฐ์˜ ์ž„๋ฒ ๋”ฉ์ธ *unconditional ํ…์ŠคํŠธ ์ž„๋ฒ ๋”ฉ*์„ ์ƒ์„ฑํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ์ด ์ž„๋ฒ ๋”ฉ์€ ์กฐ๊ฑด๋ถ€ `text_embeddings`๊ณผ ๋™์ผํ•œ shape(`batch_size` ๊ทธ๋ฆฌ๊ณ  `seq_length`)์„ ๊ฐ€์ ธ์•ผ ํ•ฉ๋‹ˆ๋‹ค: ```py >>> max_length = text_input.input_ids.shape[-1] >>> uncond_input = tokenizer([""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt") >>> uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0] ``` ๋‘๋ฒˆ์˜ forward pass๋ฅผ ํ”ผํ•˜๊ธฐ ์œ„ํ•ด conditional ์ž„๋ฒ ๋”ฉ๊ณผ unconditional ์ž„๋ฒ ๋”ฉ์„ ๋ฐฐ์น˜(batch)๋กœ ์—ฐ๊ฒฐํ•˜๊ฒ ์Šต๋‹ˆ๋‹ค: ```py >>> text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) ``` ### ๋žœ๋ค ๋…ธ์ด์ฆˆ ์ƒ์„ฑ ๊ทธ๋‹ค์Œ diffusion ํ”„๋กœ์„ธ์Šค์˜ ์‹œ์ž‘์ ์œผ๋กœ ์ดˆ๊ธฐ ๋žœ๋ค ๋…ธ์ด์ฆˆ๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค. ์ด๊ฒƒ์ด ์ด๋ฏธ์ง€์˜ ์ž ์žฌ์  ํ‘œํ˜„์ด๋ฉฐ ์ ์ฐจ์ ์œผ๋กœ ๋…ธ์ด์ฆˆ๊ฐ€ ์ œ๊ฑฐ๋ฉ๋‹ˆ๋‹ค. ์ด ์‹œ์ ์—์„œ `latent` ์ด๋ฏธ์ง€๋Š” ์ตœ์ข… ์ด๋ฏธ์ง€ ํฌ๊ธฐ๋ณด๋‹ค ์ž‘์ง€๋งŒ ๋‚˜์ค‘์— ๋ชจ๋ธ์ด ์ด๋ฅผ 512x512 ์ด๋ฏธ์ง€ ํฌ๊ธฐ๋กœ ๋ณ€ํ™˜ํ•˜๋ฏ€๋กœ ๊ดœ์ฐฎ์Šต๋‹ˆ๋‹ค. <Tip> ๐Ÿ’ก `vae` ๋ชจ๋ธ์—๋Š” 3๊ฐœ์˜ ๋‹ค์šด ์ƒ˜ํ”Œ๋ง ๋ ˆ์ด์–ด๊ฐ€ ์žˆ๊ธฐ ๋•Œ๋ฌธ์— ๋†’์ด์™€ ๋„ˆ๋น„๊ฐ€ 8๋กœ ๋‚˜๋‰ฉ๋‹ˆ๋‹ค. ๋‹ค์Œ์„ ์‹คํ–‰ํ•˜์—ฌ ํ™•์ธํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```py 2 ** (len(vae.config.block_out_channels) - 1) == 8 ``` </Tip> ```py >>> latents = torch.randn( ... (batch_size, unet.in_channels, height // 8, width // 8), ... generator=generator, ... device=torch_device, ... ) ``` ### ์ด๋ฏธ์ง€ ๋…ธ์ด์ฆˆ ์ œ๊ฑฐ ๋จผ์ € [`UniPCMultistepScheduler`]์™€ ๊ฐ™์€ ํ–ฅ์ƒ๋œ ์Šค์ผ€์ค„๋Ÿฌ์— ํ•„์š”ํ•œ ๋…ธ์ด์ฆˆ ์Šค์ผ€์ผ ๊ฐ’์ธ ์ดˆ๊ธฐ ๋…ธ์ด์ฆˆ ๋ถ„ํฌ *sigma* ๋กœ ์ž…๋ ฅ์„ ์Šค์ผ€์ผ๋ง ํ•˜๋Š” ๊ฒƒ๋ถ€ํ„ฐ ์‹œ์ž‘ํ•ฉ๋‹ˆ๋‹ค: ```py >>> latents = latents * scheduler.init_noise_sigma ``` ๋งˆ์ง€๋ง‰ ๋‹จ๊ณ„๋Š” `latent`์˜ ์ˆœ์ˆ˜ํ•œ ๋…ธ์ด์ฆˆ๋ฅผ ์ ์ง„์ ์œผ๋กœ ํ”„๋กฌํ”„ํŠธ์— ์„ค๋ช…๋œ ์ด๋ฏธ์ง€๋กœ ๋ณ€ํ™˜ํ•˜๋Š” ๋…ธ์ด์ฆˆ ์ œ๊ฑฐ ๋ฃจํ”„๋ฅผ ์ƒ์„ฑํ•˜๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค. ๋…ธ์ด์ฆˆ ์ œ๊ฑฐ ๋ฃจํ”„๋Š” ์„ธ ๊ฐ€์ง€ ์ž‘์—…์„ ์ˆ˜ํ–‰ํ•ด์•ผ ํ•œ๋‹ค๋Š” ์ ์„ ๊ธฐ์–ตํ•˜์„ธ์š”: 1. ๋…ธ์ด์ฆˆ ์ œ๊ฑฐ ์ค‘์— ์‚ฌ์šฉํ•  ์Šค์ผ€์ค„๋Ÿฌ์˜ timesteps๋ฅผ ์„ค์ •ํ•ฉ๋‹ˆ๋‹ค. 2. timestep์„ ๋”ฐ๋ผ ๋ฐ˜๋ณตํ•ฉ๋‹ˆ๋‹ค. 3. ๊ฐ timestep์—์„œ UNet ๋ชจ๋ธ์„ ํ˜ธ์ถœํ•˜์—ฌ noise residual์„ ์˜ˆ์ธกํ•˜๊ณ  ์Šค์ผ€์ค„๋Ÿฌ์— ์ „๋‹ฌํ•˜์—ฌ ์ด์ „ ๋…ธ์ด์ฆˆ ์ƒ˜ํ”Œ์„ ๊ณ„์‚ฐํ•ฉ๋‹ˆ๋‹ค. ```py >>> from tqdm.auto import tqdm >>> scheduler.set_timesteps(num_inference_steps) >>> for t in tqdm(scheduler.timesteps): ... # classifier-free guidance๋ฅผ ์ˆ˜ํ–‰ํ•˜๋Š” ๊ฒฝ์šฐ ๋‘๋ฒˆ์˜ forward pass๋ฅผ ์ˆ˜ํ–‰ํ•˜์ง€ ์•Š๋„๋ก latent๋ฅผ ํ™•์žฅ. ... latent_model_input = torch.cat([latents] * 2) ... latent_model_input = scheduler.scale_model_input(latent_model_input, timestep=t) ... # noise residual ์˜ˆ์ธก ... with torch.no_grad(): ... noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample ... # guidance ์ˆ˜ํ–‰ ... noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) ... noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) ... # ์ด์ „ ๋…ธ์ด์ฆˆ ์ƒ˜ํ”Œ์„ ๊ณ„์‚ฐ x_t -> x_t-1 ... latents = scheduler.step(noise_pred, t, latents).prev_sample ``` ### ์ด๋ฏธ์ง€ ๋””์ฝ”๋”ฉ ๋งˆ์ง€๋ง‰ ๋‹จ๊ณ„๋Š” `vae`๋ฅผ ์ด์šฉํ•˜์—ฌ ์ž ์žฌ ํ‘œํ˜„์„ ์ด๋ฏธ์ง€๋กœ ๋””์ฝ”๋”ฉํ•˜๊ณ  `sample`๊ณผ ํ•จ๊ป˜ ๋””์ฝ”๋”ฉ๋œ ์ถœ๋ ฅ์„ ์–ป๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค: ```py # latent๋ฅผ ์Šค์ผ€์ผ๋งํ•˜๊ณ  vae๋กœ ์ด๋ฏธ์ง€ ๋””์ฝ”๋”ฉ latents = 1 / 0.18215 * latents with torch.no_grad(): image = vae.decode(latents).sample ``` ๋งˆ์ง€๋ง‰์œผ๋กœ ์ด๋ฏธ์ง€๋ฅผ `PIL.Image`๋กœ ๋ณ€ํ™˜ํ•˜๋ฉด ์ƒ์„ฑ๋œ ์ด๋ฏธ์ง€๋ฅผ ํ™•์ธํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค! ```py >>> image = (image / 2 + 0.5).clamp(0, 1) >>> image = image.detach().cpu().permute(0, 2, 3, 1).numpy() >>> images = (image * 255).round().astype("uint8") >>> pil_images = [Image.fromarray(image) for image in images] >>> pil_images[0] ``` <div class="flex justify-center"> <img src="https://huggingface.co/blog/assets/98_stable_diffusion/stable_diffusion_k_lms.png"/> </div> ## ๋‹ค์Œ ๋‹จ๊ณ„ ๊ธฐ๋ณธ ํŒŒ์ดํ”„๋ผ์ธ๋ถ€ํ„ฐ ๋ณต์žกํ•œ ํŒŒ์ดํ”„๋ผ์ธ๊นŒ์ง€, ์ž์‹ ๋งŒ์˜ diffusion ์‹œ์Šคํ…œ์„ ์ž‘์„ฑํ•˜๋Š” ๋ฐ ํ•„์š”ํ•œ ๊ฒƒ์€ ๋…ธ์ด์ฆˆ ์ œ๊ฑฐ ๋ฃจํ”„๋ฟ์ด๋ผ๋Š” ๊ฒƒ์„ ์•Œ ์ˆ˜ ์žˆ์—ˆ์Šต๋‹ˆ๋‹ค. ์ด ๋ฃจํ”„๋Š” ์Šค์ผ€์ค„๋Ÿฌ์˜ timesteps๋ฅผ ์„ค์ •ํ•˜๊ณ , ์ด๋ฅผ ๋ฐ˜๋ณตํ•˜๋ฉฐ, UNet ๋ชจ๋ธ์„ ํ˜ธ์ถœํ•˜์—ฌ noise residual์„ ์˜ˆ์ธกํ•˜๊ณ  ์Šค์ผ€์ค„๋Ÿฌ์— ์ „๋‹ฌํ•˜์—ฌ ์ด์ „ ๋…ธ์ด์ฆˆ ์ƒ˜ํ”Œ์„ ๊ณ„์‚ฐํ•˜๋Š” ๊ณผ์ •์„ ๋ฒˆ๊ฐˆ์•„ ๊ฐ€๋ฉฐ ์ˆ˜ํ–‰ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ์ด๊ฒƒ์ด ๋ฐ”๋กœ ๐Ÿงจ Diffusers๊ฐ€ ์„ค๊ณ„๋œ ๋ชฉ์ ์ž…๋‹ˆ๋‹ค: ๋ชจ๋ธ๊ณผ ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ์‚ฌ์šฉํ•ด ์ž์‹ ๋งŒ์˜ diffusion ์‹œ์Šคํ…œ์„ ์ง๊ด€์ ์ด๊ณ  ์‰ฝ๊ฒŒ ์ž‘์„ฑํ•  ์ˆ˜ ์žˆ๋„๋ก ํ•˜๊ธฐ ์œ„ํ•ด์„œ์ž…๋‹ˆ๋‹ค. ๋‹ค์Œ ๋‹จ๊ณ„๋ฅผ ์ž์œ ๋กญ๊ฒŒ ์ง„ํ–‰ํ•˜์„ธ์š”: * ๐Ÿงจ Diffusers์— [ํŒŒ์ดํ”„๋ผ์ธ ๊ตฌ์ถ• ๋ฐ ๊ธฐ์—ฌ](using-diffusers/#contribute_pipeline)ํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ์•Œ์•„๋ณด์„ธ์š”. ์—ฌ๋Ÿฌ๋ถ„์ด ์–ด๋–ค ์•„์ด๋””์–ด๋ฅผ ๋‚ด๋†“์„์ง€ ๊ธฐ๋Œ€๋ฉ๋‹ˆ๋‹ค! * ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ์—์„œ [๊ธฐ๋ณธ ํŒŒ์ดํ”„๋ผ์ธ](./api/pipelines/overview)์„ ์‚ดํŽด๋ณด๊ณ , ๋ชจ๋ธ๊ณผ ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ๋ณ„๋„๋กœ ์‚ฌ์šฉํ•˜์—ฌ ํŒŒ์ดํ”„๋ผ์ธ์„ ์ฒ˜์Œ๋ถ€ํ„ฐ ํ•ด์ฒดํ•˜๊ณ  ๋นŒ๋“œํ•  ์ˆ˜ ์žˆ๋Š”์ง€ ํ™•์ธํ•ด ๋ณด์„ธ์š”.
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/using-diffusers/inpaint.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Text-guided ์ด๋ฏธ์ง€ ์ธํŽ˜์ธํŒ…(inpainting) [[open-in-colab]] [`StableDiffusionInpaintPipeline`]์€ ๋งˆ์Šคํฌ์™€ ํ…์ŠคํŠธ ํ”„๋กฌํ”„ํŠธ๋ฅผ ์ œ๊ณตํ•˜์—ฌ ์ด๋ฏธ์ง€์˜ ํŠน์ • ๋ถ€๋ถ„์„ ํŽธ์ง‘ํ•  ์ˆ˜ ์žˆ๋„๋ก ํ•ฉ๋‹ˆ๋‹ค. ์ด ๊ธฐ๋Šฅ์€ ์ธํŽ˜์ธํŒ… ์ž‘์—…์„ ์œ„ํ•ด ํŠน๋ณ„ํžˆ ํ›ˆ๋ จ๋œ [`runwayml/stable-diffusion-inpainting`](https://huggingface.co/runwayml/stable-diffusion-inpainting)๊ณผ ๊ฐ™์€ Stable Diffusion ๋ฒ„์ „์„ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค. ๋จผ์ € [`StableDiffusionInpaintPipeline`] ์ธ์Šคํ„ด์Šค๋ฅผ ๋ถˆ๋Ÿฌ์˜ต๋‹ˆ๋‹ค: ```python import PIL import requests import torch from io import BytesIO from diffusers import StableDiffusionInpaintPipeline pipeline = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, ) pipeline = pipeline.to("cuda") ``` ๋‚˜์ค‘์— ๊ต์ฒดํ•  ๊ฐ•์•„์ง€ ์ด๋ฏธ์ง€์™€ ๋งˆ์Šคํฌ๋ฅผ ๋‹ค์šด๋กœ๋“œํ•˜์„ธ์š”: ```python def download_image(url): response = requests.get(url) return PIL.Image.open(BytesIO(response.content)).convert("RGB") img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" init_image = download_image(img_url).resize((512, 512)) mask_image = download_image(mask_url).resize((512, 512)) ``` ์ด์ œ ๋งˆ์Šคํฌ๋ฅผ ๋‹ค๋ฅธ ๊ฒƒ์œผ๋กœ ๊ต์ฒดํ•˜๋ผ๋Š” ํ”„๋กฌํ”„ํŠธ๋ฅผ ๋งŒ๋“ค ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```python prompt = "Face of a yellow cat, high resolution, sitting on a park bench" image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0] ``` `image` | `mask_image` | `prompt` | output | :-------------------------:|:-------------------------:|:-------------------------:|-------------------------:| <img src="https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" alt="drawing" width="250"/> | <img src="https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" alt="drawing" width="250"/> | ***Face of a yellow cat, high resolution, sitting on a park bench*** | <img src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/in_paint/yellow_cat_sitting_on_a_park_bench.png" alt="drawing" width="250"/> | <Tip warning={true}> ์ด์ „์˜ ์‹คํ—˜์ ์ธ ์ธํŽ˜์ธํŒ… ๊ตฌํ˜„์—์„œ๋Š” ํ’ˆ์งˆ์ด ๋‚ฎ์€ ๋‹ค๋ฅธ ํ”„๋กœ์„ธ์Šค๋ฅผ ์‚ฌ์šฉํ–ˆ์Šต๋‹ˆ๋‹ค. ์ด์ „ ๋ฒ„์ „๊ณผ์˜ ํ˜ธํ™˜์„ฑ์„ ๋ณด์žฅํ•˜๊ธฐ ์œ„ํ•ด ์ƒˆ ๋ชจ๋ธ์ด ํฌํ•จ๋˜์ง€ ์•Š์€ ์‚ฌ์ „ํ•™์Šต๋œ ํŒŒ์ดํ”„๋ผ์ธ์„ ๋ถˆ๋Ÿฌ์˜ค๋ฉด ์ด์ „ ์ธํŽ˜์ธํŒ… ๋ฐฉ๋ฒ•์ด ๊ณ„์† ์ ์šฉ๋ฉ๋‹ˆ๋‹ค. </Tip> ์•„๋ž˜ Space์—์„œ ์ด๋ฏธ์ง€ ์ธํŽ˜์ธํŒ…์„ ์ง์ ‘ ํ•ด๋ณด์„ธ์š”! <iframe src="https://runwayml-stable-diffusion-inpainting.hf.space" frameborder="0" width="850" height="500" ></iframe>
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/using-diffusers/custom_pipeline_examples.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ > **์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ์— ๋Œ€ํ•œ ์ž์„ธํ•œ ๋‚ด์šฉ์€ [์ด ์ด์Šˆ](https://github.com/huggingface/diffusers/issues/841)๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. **์ปค๋ฎค๋‹ˆํ‹ฐ** ์˜ˆ์ œ๋Š” ์ปค๋ฎค๋‹ˆํ‹ฐ์—์„œ ์ถ”๊ฐ€ํ•œ ์ถ”๋ก  ๋ฐ ํ›ˆ๋ จ ์˜ˆ์ œ๋กœ ๊ตฌ์„ฑ๋˜์–ด ์žˆ์Šต๋‹ˆ๋‹ค. ๋‹ค์Œ ํ‘œ๋ฅผ ์ฐธ์กฐํ•˜์—ฌ ๋ชจ๋“  ์ปค๋ฎค๋‹ˆํ‹ฐ ์˜ˆ์ œ์— ๋Œ€ํ•œ ๊ฐœ์š”๋ฅผ ํ™•์ธํ•˜์‹œ๊ธฐ ๋ฐ”๋ž๋‹ˆ๋‹ค. **์ฝ”๋“œ ์˜ˆ์ œ**๋ฅผ ํด๋ฆญํ•˜๋ฉด ๋ณต์‚ฌํ•˜์—ฌ ๋ถ™์—ฌ๋„ฃ๊ธฐํ•  ์ˆ˜ ์žˆ๋Š” ์ฝ”๋“œ ์˜ˆ์ œ๋ฅผ ํ™•์ธํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ปค๋ฎค๋‹ˆํ‹ฐ๊ฐ€ ์˜ˆ์ƒ๋Œ€๋กœ ์ž‘๋™ํ•˜์ง€ ์•Š๋Š” ๊ฒฝ์šฐ ์ด์Šˆ๋ฅผ ๊ฐœ์„คํ•˜๊ณ  ์ž‘์„ฑ์ž์—๊ฒŒ ํ•‘์„ ๋ณด๋‚ด์ฃผ์„ธ์š”. | ์˜ˆ | ์„ค๋ช… | ์ฝ”๋“œ ์˜ˆ์ œ | ์ฝœ๋žฉ |์ €์ž | |:---------------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------:| | CLIP Guided Stable Diffusion | CLIP ๊ฐ€์ด๋“œ ๊ธฐ๋ฐ˜์˜ Stable Diffusion์œผ๋กœ ํ…์ŠคํŠธ์—์„œ ์ด๋ฏธ์ง€๋กœ ์ƒ์„ฑํ•˜๊ธฐ | [CLIP Guided Stable Diffusion](#clip-guided-stable-diffusion) | [![์ฝœ๋žฉ์—์„œ ์—ด๊ธฐ](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/CLIP_Guided_Stable_diffusion_with_diffusers.ipynb) | [Suraj Patil](https://github.com/patil-suraj/) | | One Step U-Net (Dummy) | ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ์„ ์–ด๋–ป๊ฒŒ ์‚ฌ์šฉํ•ด์•ผ ํ•˜๋Š”์ง€์— ๋Œ€ํ•œ ์˜ˆ์‹œ(์ฐธ๊ณ  https://github.com/huggingface/diffusers/issues/841) | [One Step U-Net](#one-step-unet) | - | [Patrick von Platen](https://github.com/patrickvonplaten/) | | Stable Diffusion Interpolation | ์„œ๋กœ ๋‹ค๋ฅธ ํ”„๋กฌํ”„ํŠธ/์‹œ๋“œ ๊ฐ„ Stable Diffusion์˜ latent space ๋ณด๊ฐ„ | [Stable Diffusion Interpolation](#stable-diffusion-interpolation) | - | [Nate Raw](https://github.com/nateraw/) | | Stable Diffusion Mega | ๋ชจ๋“  ๊ธฐ๋Šฅ์„ ๊ฐ–์ถ˜ **ํ•˜๋‚˜์˜** Stable Diffusion ํŒŒ์ดํ”„๋ผ์ธ [Text2Image](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py), [Image2Image](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py) and [Inpainting](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py) | [Stable Diffusion Mega](#stable-diffusion-mega) | - | [Patrick von Platen](https://github.com/patrickvonplaten/) | | Long Prompt Weighting Stable Diffusion | ํ† ํฐ ๊ธธ์ด ์ œํ•œ์ด ์—†๊ณ  ํ”„๋กฌํ”„ํŠธ์—์„œ ํŒŒ์‹ฑ ๊ฐ€์ค‘์น˜ ์ง€์›์„ ํ•˜๋Š” **ํ•˜๋‚˜์˜** Stable Diffusion ํŒŒ์ดํ”„๋ผ์ธ, | [Long Prompt Weighting Stable Diffusion](#long-prompt-weighting-stable-diffusion) |- | [SkyTNT](https://github.com/SkyTNT) | | Speech to Image | ์ž๋™ ์Œ์„ฑ ์ธ์‹์„ ์‚ฌ์šฉํ•˜์—ฌ ํ…์ŠคํŠธ๋ฅผ ์ž‘์„ฑํ•˜๊ณ  Stable Diffusion์„ ์‚ฌ์šฉํ•˜์—ฌ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค. | [Speech to Image](#speech-to-image) | - | [Mikail Duzenli](https://github.com/MikailINTech) | ์ปค์Šคํ…€ ํŒŒ์ดํ”„๋ผ์ธ์„ ๋ถˆ๋Ÿฌ์˜ค๋ ค๋ฉด `diffusers/examples/community`์— ์žˆ๋Š” ํŒŒ์ผ ์ค‘ ํ•˜๋‚˜๋กœ์„œ `custom_pipeline` ์ธ์ˆ˜๋ฅผ `DiffusionPipeline`์— ์ „๋‹ฌํ•˜๊ธฐ๋งŒ ํ•˜๋ฉด ๋ฉ๋‹ˆ๋‹ค. ์ž์‹ ๋งŒ์˜ ํŒŒ์ดํ”„๋ผ์ธ์ด ์žˆ๋Š” PR์„ ๋ณด๋‚ด์ฃผ์‹œ๋ฉด ๋น ๋ฅด๊ฒŒ ๋ณ‘ํ•ฉํ•ด๋“œ๋ฆฌ๊ฒ ์Šต๋‹ˆ๋‹ค. ```py pipe = DiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", custom_pipeline="filename_in_the_community_folder" ) ``` ## ์‚ฌ์šฉ ์˜ˆ์‹œ ### CLIP ๊ฐ€์ด๋“œ ๊ธฐ๋ฐ˜์˜ Stable Diffusion ๋ชจ๋“  ๋…ธ์ด์ฆˆ ์ œ๊ฑฐ ๋‹จ๊ณ„์—์„œ ์ถ”๊ฐ€ CLIP ๋ชจ๋ธ์„ ํ†ตํ•ด Stable Diffusion์„ ๊ฐ€์ด๋“œํ•จ์œผ๋กœ์จ CLIP ๋ชจ๋ธ ๊ธฐ๋ฐ˜์˜ Stable Diffusion์€ ๋ณด๋‹ค ๋” ์‚ฌ์‹ค์ ์ธ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑ์„ ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋‹ค์Œ ์ฝ”๋“œ๋Š” ์•ฝ 12GB์˜ GPU RAM์ด ํ•„์š”ํ•ฉ๋‹ˆ๋‹ค. ```python from diffusers import DiffusionPipeline from transformers import CLIPImageProcessor, CLIPModel import torch feature_extractor = CLIPImageProcessor.from_pretrained("laion/CLIP-ViT-B-32-laion2B-s34B-b79K") clip_model = CLIPModel.from_pretrained("laion/CLIP-ViT-B-32-laion2B-s34B-b79K", torch_dtype=torch.float16) guided_pipeline = DiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", custom_pipeline="clip_guided_stable_diffusion", clip_model=clip_model, feature_extractor=feature_extractor, torch_dtype=torch.float16, ) guided_pipeline.enable_attention_slicing() guided_pipeline = guided_pipeline.to("cuda") prompt = "fantasy book cover, full moon, fantasy forest landscape, golden vector elements, fantasy magic, dark light night, intricate, elegant, sharp focus, illustration, highly detailed, digital painting, concept art, matte, art by WLOP and Artgerm and Albert Bierstadt, masterpiece" generator = torch.Generator(device="cuda").manual_seed(0) images = [] for i in range(4): image = guided_pipeline( prompt, num_inference_steps=50, guidance_scale=7.5, clip_guidance_scale=100, num_cutouts=4, use_cutouts=False, generator=generator, ).images[0] images.append(image) # ์ด๋ฏธ์ง€ ๋กœ์ปฌ์— ์ €์žฅํ•˜๊ธฐ for i, img in enumerate(images): img.save(f"./clip_guided_sd/image_{i}.png") ``` ์ด๋ฏธ์ง€` ๋ชฉ๋ก์—๋Š” ๋กœ์ปฌ์— ์ €์žฅํ•˜๊ฑฐ๋‚˜ ๊ตฌ๊ธ€ ์ฝœ๋žฉ์— ์ง์ ‘ ํ‘œ์‹œํ•  ์ˆ˜ ์žˆ๋Š” PIL ์ด๋ฏธ์ง€ ๋ชฉ๋ก์ด ํฌํ•จ๋˜์–ด ์žˆ์Šต๋‹ˆ๋‹ค. ์ƒ์„ฑ๋œ ์ด๋ฏธ์ง€๋Š” ๊ธฐ๋ณธ์ ์œผ๋กœ ์•ˆ์ •์ ์ธ ํ™•์‚ฐ์„ ์‚ฌ์šฉํ•˜๋Š” ๊ฒƒ๋ณด๋‹ค ํ’ˆ์งˆ์ด ๋†’์€ ๊ฒฝํ–ฅ์ด ์žˆ์Šต๋‹ˆ๋‹ค. ์˜ˆ๋ฅผ ๋“ค์–ด ์œ„์˜ ์Šคํฌ๋ฆฝํŠธ๋Š” ๋‹ค์Œ๊ณผ ๊ฐ™์€ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค: ![clip_guidance](https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/clip_guidance/merged_clip_guidance.jpg). ### One Step Unet ์˜ˆ์‹œ "one-step-unet"๋Š” ๋‹ค์Œ๊ณผ ๊ฐ™์ด ์‹คํ–‰ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ```python from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("google/ddpm-cifar10-32", custom_pipeline="one_step_unet") pipe() ``` **์ฐธ๊ณ **: ์ด ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ์€ ๊ธฐ๋Šฅ์œผ๋กœ ์œ ์šฉํ•˜์ง€ ์•Š์œผ๋ฉฐ ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ์„ ์ถ”๊ฐ€ํ•  ์ˆ˜ ์žˆ๋Š” ๋ฐฉ๋ฒ•์˜ ์˜ˆ์‹œ์ผ ๋ฟ์ž…๋‹ˆ๋‹ค(https://github.com/huggingface/diffusers/issues/841 ์ฐธ์กฐ). ### Stable Diffusion Interpolation ๋‹ค์Œ ์ฝ”๋“œ๋Š” ์ตœ์†Œ 8GB VRAM์˜ GPU์—์„œ ์‹คํ–‰ํ•  ์ˆ˜ ์žˆ์œผ๋ฉฐ ์•ฝ 5๋ถ„ ์ •๋„ ์†Œ์š”๋ฉ๋‹ˆ๋‹ค. ```python from diffusers import DiffusionPipeline import torch pipe = DiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16, safety_checker=None, # Very important for videos...lots of false positives while interpolating custom_pipeline="interpolate_stable_diffusion", ).to("cuda") pipe.enable_attention_slicing() frame_filepaths = pipe.walk( prompts=["a dog", "a cat", "a horse"], seeds=[42, 1337, 1234], num_interpolation_steps=16, output_dir="./dreams", batch_size=4, height=512, width=512, guidance_scale=8.5, num_inference_steps=50, ) ``` walk(...)` ํ•จ์ˆ˜์˜ ์ถœ๋ ฅ์€ `output_dir`์— ์ •์˜๋œ ๋Œ€๋กœ ํด๋”์— ์ €์žฅ๋œ ์ด๋ฏธ์ง€ ๋ชฉ๋ก์„ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค. ์ด ์ด๋ฏธ์ง€๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์•ˆ์ •์ ์œผ๋กœ ํ™•์‚ฐ๋˜๋Š” ๋™์˜์ƒ์„ ๋งŒ๋“ค ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. > ์•ˆ์ •๋œ ํ™•์‚ฐ์„ ์ด์šฉํ•œ ๋™์˜์ƒ ์ œ์ž‘ ๋ฐฉ๋ฒ•๊ณผ ๋” ๋งŽ์€ ๊ธฐ๋Šฅ์— ๋Œ€ํ•œ ์ž์„ธํ•œ ๋‚ด์šฉ์€ https://github.com/nateraw/stable-diffusion-videos ์—์„œ ํ™•์ธํ•˜์‹œ๊ธฐ ๋ฐ”๋ž๋‹ˆ๋‹ค. ### Stable Diffusion Mega The Stable Diffusion Mega ํŒŒ์ดํ”„๋ผ์ธ์„ ์‚ฌ์šฉํ•˜๋ฉด Stable Diffusion ํŒŒ์ดํ”„๋ผ์ธ์˜ ์ฃผ์š” ์‚ฌ์šฉ ์‚ฌ๋ก€๋ฅผ ๋‹จ์ผ ํด๋ž˜์Šค์—์„œ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ```python #!/usr/bin/env python3 from diffusers import DiffusionPipeline import PIL import requests from io import BytesIO import torch def download_image(url): response = requests.get(url) return PIL.Image.open(BytesIO(response.content)).convert("RGB") pipe = DiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", custom_pipeline="stable_diffusion_mega", torch_dtype=torch.float16, ) pipe.to("cuda") pipe.enable_attention_slicing() ### Text-to-Image images = pipe.text2img("An astronaut riding a horse").images ### Image-to-Image init_image = download_image( "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" ) prompt = "A fantasy landscape, trending on artstation" images = pipe.img2img(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5).images ### Inpainting img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" init_image = download_image(img_url).resize((512, 512)) mask_image = download_image(mask_url).resize((512, 512)) prompt = "a cat sitting on a bench" images = pipe.inpaint(prompt=prompt, image=init_image, mask_image=mask_image, strength=0.75).images ``` ์œ„์— ํ‘œ์‹œ๋œ ๊ฒƒ์ฒ˜๋Ÿผ ํ•˜๋‚˜์˜ ํŒŒ์ดํ”„๋ผ์ธ์—์„œ 'ํ…์ŠคํŠธ-์ด๋ฏธ์ง€ ๋ณ€ํ™˜', '์ด๋ฏธ์ง€-์ด๋ฏธ์ง€ ๋ณ€ํ™˜', '์ธํŽ˜์ธํŒ…'์„ ๋ชจ๋‘ ์‹คํ–‰ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ### Long Prompt Weighting Stable Diffusion ํŒŒ์ดํ”„๋ผ์ธ์„ ์‚ฌ์šฉํ•˜๋ฉด 77๊ฐœ์˜ ํ† ํฐ ๊ธธ์ด ์ œํ•œ ์—†์ด ํ”„๋กฌํ”„ํŠธ๋ฅผ ์ž…๋ ฅํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋˜ํ•œ "()"๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ๋‹จ์–ด ๊ฐ€์ค‘์น˜๋ฅผ ๋†’์ด๊ฑฐ๋‚˜ "[]"๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ๋‹จ์–ด ๊ฐ€์ค‘์น˜๋ฅผ ๋‚ฎ์ถœ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋˜ํ•œ ํŒŒ์ดํ”„๋ผ์ธ์„ ์‚ฌ์šฉํ•˜๋ฉด ๋‹จ์ผ ํด๋ž˜์Šค์—์„œ Stable Diffusion ํŒŒ์ดํ”„๋ผ์ธ์˜ ์ฃผ์š” ์‚ฌ์šฉ ์‚ฌ๋ก€๋ฅผ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. #### pytorch ```python from diffusers import DiffusionPipeline import torch pipe = DiffusionPipeline.from_pretrained( "hakurei/waifu-diffusion", custom_pipeline="lpw_stable_diffusion", torch_dtype=torch.float16 ) pipe = pipe.to("cuda") prompt = "best_quality (1girl:1.3) bow bride brown_hair closed_mouth frilled_bow frilled_hair_tubes frills (full_body:1.3) fox_ear hair_bow hair_tubes happy hood japanese_clothes kimono long_sleeves red_bow smile solo tabi uchikake white_kimono wide_sleeves cherry_blossoms" neg_prompt = "lowres, bad_anatomy, error_body, error_hair, error_arm, error_hands, bad_hands, error_fingers, bad_fingers, missing_fingers, error_legs, bad_legs, multiple_legs, missing_legs, error_lighting, error_shadow, error_reflection, text, error, extra_digit, fewer_digits, cropped, worst_quality, low_quality, normal_quality, jpeg_artifacts, signature, watermark, username, blurry" pipe.text2img(prompt, negative_prompt=neg_prompt, width=512, height=512, max_embeddings_multiples=3).images[0] ``` #### onnxruntime ```python from diffusers import DiffusionPipeline import torch pipe = DiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", custom_pipeline="lpw_stable_diffusion_onnx", revision="onnx", provider="CUDAExecutionProvider", ) prompt = "a photo of an astronaut riding a horse on mars, best quality" neg_prompt = "lowres, bad anatomy, error body, error hair, error arm, error hands, bad hands, error fingers, bad fingers, missing fingers, error legs, bad legs, multiple legs, missing legs, error lighting, error shadow, error reflection, text, error, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry" pipe.text2img(prompt, negative_prompt=neg_prompt, width=512, height=512, max_embeddings_multiples=3).images[0] ``` ํ† ํฐ ์ธ๋ฑ์Šค ์‹œํ€€์Šค ๊ธธ์ด๊ฐ€ ์ด ๋ชจ๋ธ์— ์ง€์ •๋œ ์ตœ๋Œ€ ์‹œํ€€์Šค ๊ธธ์ด๋ณด๋‹ค ๊ธธ๋ฉด(*** > 77). ์ด ์‹œํ€€์Šค๋ฅผ ๋ชจ๋ธ์—์„œ ์‹คํ–‰ํ•˜๋ฉด ์ธ๋ฑ์‹ฑ ์˜ค๋ฅ˜๊ฐ€ ๋ฐœ์ƒํ•ฉ๋‹ˆ๋‹ค`. ์ •์ƒ์ ์ธ ํ˜„์ƒ์ด๋‹ˆ ๊ฑฑ์ •ํ•˜์ง€ ๋งˆ์„ธ์š”. ### Speech to Image ๋‹ค์Œ ์ฝ”๋“œ๋Š” ์‚ฌ์ „ํ•™์Šต๋œ OpenAI whisper-small๊ณผ Stable Diffusion์„ ์‚ฌ์šฉํ•˜์—ฌ ์˜ค๋””์˜ค ์ƒ˜ํ”Œ์—์„œ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ```Python import torch import matplotlib.pyplot as plt from datasets import load_dataset from diffusers import DiffusionPipeline from transformers import ( WhisperForConditionalGeneration, WhisperProcessor, ) device = "cuda" if torch.cuda.is_available() else "cpu" ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") audio_sample = ds[3] text = audio_sample["text"].lower() speech_data = audio_sample["audio"]["array"] model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small").to(device) processor = WhisperProcessor.from_pretrained("openai/whisper-small") diffuser_pipeline = DiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", custom_pipeline="speech_to_image_diffusion", speech_model=model, speech_processor=processor, torch_dtype=torch.float16, ) diffuser_pipeline.enable_attention_slicing() diffuser_pipeline = diffuser_pipeline.to(device) output = diffuser_pipeline(speech_data) plt.imshow(output.images[0]) ``` ์œ„ ์˜ˆ์‹œ๋Š” ๋‹ค์Œ์˜ ๊ฒฐ๊ณผ ์ด๋ฏธ์ง€๋ฅผ ๋ณด์ž…๋‹ˆ๋‹ค. ![image](https://user-images.githubusercontent.com/45072645/196901736-77d9c6fc-63ee-4072-90b0-dc8b903d63e3.png)
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/using-diffusers/schedulers.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ์Šค์ผ€์ค„๋Ÿฌ diffusion ํŒŒ์ดํ”„๋ผ์ธ์€ diffusion ๋ชจ๋ธ, ์Šค์ผ€์ค„๋Ÿฌ ๋“ฑ์˜ ์ปดํฌ๋„ŒํŠธ๋“ค๋กœ ๊ตฌ์„ฑ๋ฉ๋‹ˆ๋‹ค. ๊ทธ๋ฆฌ๊ณ  ํŒŒ์ดํ”„๋ผ์ธ ์•ˆ์˜ ์ผ๋ถ€ ์ปดํฌ๋„ŒํŠธ๋ฅผ ๋‹ค๋ฅธ ์ปดํฌ๋„ŒํŠธ๋กœ ๊ต์ฒดํ•˜๋Š” ์‹์˜ ์ปค์Šคํ„ฐ๋งˆ์ด์ง• ์—ญ์‹œ ๊ฐ€๋Šฅํ•ฉ๋‹ˆ๋‹ค. ์ด์™€ ๊ฐ™์€ ์ปดํฌ๋„ŒํŠธ ์ปค์Šคํ„ฐ๋งˆ์ด์ง•์˜ ๊ฐ€์žฅ ๋Œ€ํ‘œ์ ์ธ ์˜ˆ์‹œ๊ฐ€ ๋ฐ”๋กœ [์Šค์ผ€์ค„๋Ÿฌ](../api/schedulers/overview.md)๋ฅผ ๊ต์ฒดํ•˜๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค. ์Šค์ผ€์ฅด๋Ÿฌ๋Š” ๋‹ค์Œ๊ณผ ๊ฐ™์ด diffusion ์‹œ์Šคํ…œ์˜ ์ „๋ฐ˜์ ์ธ ๋””๋…ธ์ด์ง• ํ”„๋กœ์„ธ์Šค๋ฅผ ์ •์˜ํ•ฉ๋‹ˆ๋‹ค. - ๋””๋…ธ์ด์ง• ์Šคํ…์„ ์–ผ๋งˆ๋‚˜ ๊ฐ€์ ธ๊ฐ€์•ผ ํ• ๊นŒ? - ํ™•๋ฅ ์ ์œผ๋กœ(stochastic) ํ˜น์€ ํ™•์ •์ ์œผ๋กœ(deterministic)? - ๋””๋…ธ์ด์ง• ๋œ ์ƒ˜ํ”Œ์„ ์ฐพ์•„๋‚ด๊ธฐ ์œ„ํ•ด ์–ด๋–ค ์•Œ๊ณ ๋ฆฌ์ฆ˜์„ ์‚ฌ์šฉํ•ด์•ผ ํ• ๊นŒ? ์ด๋Ÿฌํ•œ ํ”„๋กœ์„ธ์Šค๋Š” ๋‹ค์†Œ ๋‚œํ•ดํ•˜๊ณ , ๋””๋…ธ์ด์ง• ์†๋„์™€ ๋””๋…ธ์ด์ง• ํ€„๋ฆฌํ‹ฐ ์‚ฌ์ด์˜ ํŠธ๋ ˆ์ด๋“œ ์˜คํ”„๋ฅผ ์ •์˜ํ•ด์•ผ ํ•˜๋Š” ๋ฌธ์ œ๊ฐ€ ๋  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ฃผ์–ด์ง„ ํŒŒ์ดํ”„๋ผ์ธ์— ์–ด๋–ค ์Šค์ผ€์ค„๋Ÿฌ๊ฐ€ ๊ฐ€์žฅ ์ ํ•ฉํ•œ์ง€๋ฅผ ์ •๋Ÿ‰์ ์œผ๋กœ ํŒ๋‹จํ•˜๋Š” ๊ฒƒ์€ ๋งค์šฐ ์–ด๋ ค์šด ์ผ์ž…๋‹ˆ๋‹ค. ์ด๋กœ ์ธํ•ด ์ผ๋‹จ ํ•ด๋‹น ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ์ง์ ‘ ์‚ฌ์šฉํ•˜์—ฌ, ์ƒ์„ฑ๋˜๋Š” ์ด๋ฏธ์ง€๋ฅผ ์ง์ ‘ ๋ˆˆ์œผ๋กœ ๋ณด๋ฉฐ, ์ •์„ฑ์ ์œผ๋กœ ์„ฑ๋Šฅ์„ ํŒ๋‹จํ•ด๋ณด๋Š” ๊ฒƒ์ด ์ถ”์ฒœ๋˜๊ณค ํ•ฉ๋‹ˆ๋‹ค. ## ํŒŒ์ดํ”„๋ผ์ธ ๋ถˆ๋Ÿฌ์˜ค๊ธฐ ๋จผ์ € ์Šคํ…Œ์ด๋ธ” diffusion ํŒŒ์ดํ”„๋ผ์ธ์„ ๋ถˆ๋Ÿฌ์˜ค๋„๋ก ํ•ด๋ณด๊ฒ ์Šต๋‹ˆ๋‹ค. ๋ฌผ๋ก  ์Šคํ…Œ์ด๋ธ” diffusion์„ ์‚ฌ์šฉํ•˜๊ธฐ ์œ„ํ•ด์„œ๋Š”, ํ—ˆ๊น…ํŽ˜์ด์Šค ํ—ˆ๋ธŒ์— ๋“ฑ๋ก๋œ ์‚ฌ์šฉ์ž์—ฌ์•ผ ํ•˜๋ฉฐ, ๊ด€๋ จ [๋ผ์ด์„ผ์Šค](https://huggingface.co/runwayml/stable-diffusion-v1-5)์— ๋™์˜ํ•ด์•ผ ํ•œ๋‹ค๋Š” ์ ์„ ์žŠ์ง€ ๋ง์•„์ฃผ์„ธ์š”. *์—ญ์ž ์ฃผ: ๋‹ค๋งŒ, ํ˜„์žฌ ์‹ ๊ทœ๋กœ ์ƒ์„ฑํ•œ ํ—ˆ๊น…ํŽ˜์ด์Šค ๊ณ„์ •์— ๋Œ€ํ•ด์„œ๋Š” ๋ผ์ด์„ผ์Šค ๋™์˜๋ฅผ ์š”๊ตฌํ•˜์ง€ ์•Š๋Š” ๊ฒƒ์œผ๋กœ ๋ณด์ž…๋‹ˆ๋‹ค!* ```python from huggingface_hub import login from diffusers import DiffusionPipeline import torch # first we need to login with our access token login() # Now we can download the pipeline pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) ``` ๋‹ค์Œ์œผ๋กœ, GPU๋กœ ์ด๋™ํ•ฉ๋‹ˆ๋‹ค. ```python pipeline.to("cuda") ``` ## ์Šค์ผ€์ค„๋Ÿฌ ์•ก์„ธ์Šค ์Šค์ผ€์ค„๋Ÿฌ๋Š” ์–ธ์ œ๋‚˜ ํŒŒ์ดํ”„๋ผ์ธ์˜ ์ปดํฌ๋„ŒํŠธ๋กœ์„œ ์กด์žฌํ•˜๋ฉฐ, ์ผ๋ฐ˜์ ์œผ๋กœ ํŒŒ์ดํ”„๋ผ์ธ ์ธ์Šคํ„ด์Šค ๋‚ด์— `scheduler`๋ผ๋Š” ์ด๋ฆ„์˜ ์†์„ฑ(property)์œผ๋กœ ์ •์˜๋˜์–ด ์žˆ์Šต๋‹ˆ๋‹ค. ```python pipeline.scheduler ``` **Output**: ``` PNDMScheduler { "_class_name": "PNDMScheduler", "_diffusers_version": "0.8.0.dev0", "beta_end": 0.012, "beta_schedule": "scaled_linear", "beta_start": 0.00085, "clip_sample": false, "num_train_timesteps": 1000, "set_alpha_to_one": false, "skip_prk_steps": true, "steps_offset": 1, "trained_betas": null } ``` ์ถœ๋ ฅ ๊ฒฐ๊ณผ๋ฅผ ํ†ตํ•ด, ์šฐ๋ฆฌ๋Š” ํ•ด๋‹น ์Šค์ผ€์ค„๋Ÿฌ๊ฐ€ [`PNDMScheduler`]์˜ ์ธ์Šคํ„ด์Šค๋ผ๋Š” ๊ฒƒ์„ ์•Œ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด์ œ [`PNDMScheduler`]์™€ ๋‹ค๋ฅธ ์Šค์ผ€์ค„๋Ÿฌ๋“ค์˜ ์„ฑ๋Šฅ์„ ๋น„๊ตํ•ด๋ณด๋„๋ก ํ•˜๊ฒ ์Šต๋‹ˆ๋‹ค. ๋จผ์ € ํ…Œ์ŠคํŠธ์— ์‚ฌ์šฉํ•  ํ”„๋กฌํ”„ํŠธ๋ฅผ ๋‹ค์Œ๊ณผ ๊ฐ™์ด ์ •์˜ํ•ด๋ณด๋„๋ก ํ•˜๊ฒ ์Šต๋‹ˆ๋‹ค. ```python prompt = "A photograph of an astronaut riding a horse on Mars, high resolution, high definition." ``` ๋‹ค์Œ์œผ๋กœ ์œ ์‚ฌํ•œ ์ด๋ฏธ์ง€ ์ƒ์„ฑ์„ ๋ณด์žฅํ•˜๊ธฐ ์œ„ํ•ด์„œ, ๋‹ค์Œ๊ณผ ๊ฐ™์ด ๋žœ๋ค์‹œ๋“œ๋ฅผ ๊ณ ์ •ํ•ด์ฃผ๋„๋ก ํ•˜๊ฒ ์Šต๋‹ˆ๋‹ค. ```python generator = torch.Generator(device="cuda").manual_seed(8) image = pipeline(prompt, generator=generator).images[0] image ``` <p align="center"> <br> <img src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/diffusers_docs/astronaut_pndm.png" width="400"/> <br> </p> ## ์Šค์ผ€์ค„๋Ÿฌ ๊ต์ฒดํ•˜๊ธฐ ๋‹ค์Œ์œผ๋กœ ํŒŒ์ดํ”„๋ผ์ธ์˜ ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ๋‹ค๋ฅธ ์Šค์ผ€์ค„๋Ÿฌ๋กœ ๊ต์ฒดํ•˜๋Š” ๋ฐฉ๋ฒ•์— ๋Œ€ํ•ด ์•Œ์•„๋ณด๊ฒ ์Šต๋‹ˆ๋‹ค. ๋ชจ๋“  ์Šค์ผ€์ค„๋Ÿฌ๋Š” [`SchedulerMixin.compatibles`]๋ผ๋Š” ์†์„ฑ(property)์„ ๊ฐ–๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค. ํ•ด๋‹น ์†์„ฑ์€ **ํ˜ธํ™˜ ๊ฐ€๋Šฅํ•œ** ์Šค์ผ€์ค„๋Ÿฌ๋“ค์— ๋Œ€ํ•œ ์ •๋ณด๋ฅผ ๋‹ด๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค. ```python pipeline.scheduler.compatibles ``` **Output**: ``` [diffusers.schedulers.scheduling_lms_discrete.LMSDiscreteScheduler, diffusers.schedulers.scheduling_ddim.DDIMScheduler, diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler, diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler, diffusers.schedulers.scheduling_pndm.PNDMScheduler, diffusers.schedulers.scheduling_ddpm.DDPMScheduler, diffusers.schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteScheduler] ``` ํ˜ธํ™˜๋˜๋Š” ์Šค์ผ€์ค„๋Ÿฌ๋“ค์„ ์‚ดํŽด๋ณด๋ฉด ์•„๋ž˜์™€ ๊ฐ™์Šต๋‹ˆ๋‹ค. - [`LMSDiscreteScheduler`], - [`DDIMScheduler`], - [`DPMSolverMultistepScheduler`], - [`EulerDiscreteScheduler`], - [`PNDMScheduler`], - [`DDPMScheduler`], - [`EulerAncestralDiscreteScheduler`]. ์•ž์„œ ์ •์˜ํ–ˆ๋˜ ํ”„๋กฌํ”„ํŠธ๋ฅผ ์‚ฌ์šฉํ•ด์„œ ๊ฐ๊ฐ์˜ ์Šค์ผ€์ค„๋Ÿฌ๋“ค์„ ๋น„๊ตํ•ด๋ณด๋„๋ก ํ•˜๊ฒ ์Šต๋‹ˆ๋‹ค. ๋จผ์ € ํŒŒ์ดํ”„๋ผ์ธ ์•ˆ์˜ ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ๋ฐ”๊พธ๊ธฐ ์œ„ํ•ด [`ConfigMixin.config`] ์†์„ฑ๊ณผ [`ConfigMixin.from_config`] ๋ฉ”์„œ๋“œ๋ฅผ ํ™œ์šฉํ•ด๋ณด๋ ค๊ณ  ํ•ฉ๋‹ˆ๋‹ค. ```python pipeline.scheduler.config ``` **Output**: ``` FrozenDict([('num_train_timesteps', 1000), ('beta_start', 0.00085), ('beta_end', 0.012), ('beta_schedule', 'scaled_linear'), ('trained_betas', None), ('skip_prk_steps', True), ('set_alpha_to_one', False), ('steps_offset', 1), ('_class_name', 'PNDMScheduler'), ('_diffusers_version', '0.8.0.dev0'), ('clip_sample', False)]) ``` ๊ธฐ์กด ์Šค์ผ€์ค„๋Ÿฌ์˜ config๋ฅผ ํ˜ธํ™˜ ๊ฐ€๋Šฅํ•œ ๋‹ค๋ฅธ ์Šค์ผ€์ค„๋Ÿฌ์— ์ด์‹ํ•˜๋Š” ๊ฒƒ ์—ญ์‹œ ๊ฐ€๋Šฅํ•ฉ๋‹ˆ๋‹ค. ๋‹ค์Œ ์˜ˆ์‹œ๋Š” ๊ธฐ์กด ์Šค์ผ€์ค„๋Ÿฌ(`pipeline.scheduler`)๋ฅผ ๋‹ค๋ฅธ ์ข…๋ฅ˜์˜ ์Šค์ผ€์ค„๋Ÿฌ(`DDIMScheduler`)๋กœ ๋ฐ”๊พธ๋Š” ์ฝ”๋“œ์ž…๋‹ˆ๋‹ค. ๊ธฐ์กด ์Šค์ผ€์ค„๋Ÿฌ๊ฐ€ ๊ฐ–๊ณ  ์žˆ๋˜ config๋ฅผ `.from_config` ๋ฉ”์„œ๋“œ์˜ ์ธ์ž๋กœ ์ „๋‹ฌํ•˜๋Š” ๊ฒƒ์„ ํ™•์ธํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ```python from diffusers import DDIMScheduler pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config) ``` ์ด์ œ ํŒŒ์ดํ”„๋ผ์ธ์„ ์‹คํ–‰ํ•ด์„œ ๋‘ ์Šค์ผ€์ค„๋Ÿฌ ์‚ฌ์ด์˜ ์ƒ์„ฑ๋œ ์ด๋ฏธ์ง€์˜ ํ€„๋ฆฌํ‹ฐ๋ฅผ ๋น„๊ตํ•ด๋ด…์‹œ๋‹ค. ```python generator = torch.Generator(device="cuda").manual_seed(8) image = pipeline(prompt, generator=generator).images[0] image ``` <p align="center"> <br> <img src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/diffusers_docs/astronaut_ddim.png" width="400"/> <br> </p> ## ์Šค์ผ€์ค„๋Ÿฌ๋“ค ๋น„๊ตํ•ด๋ณด๊ธฐ ์ง€๊ธˆ๊นŒ์ง€๋Š” [`PNDMScheduler`]์™€ [`DDIMScheduler`] ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ์‹คํ–‰ํ•ด๋ณด์•˜์Šต๋‹ˆ๋‹ค. ์•„์ง ๋น„๊ตํ•ด๋ณผ ์Šค์ผ€์ค„๋Ÿฌ๋“ค์ด ๋” ๋งŽ์ด ๋‚จ์•„์žˆ์œผ๋‹ˆ ๊ณ„์† ๋น„๊ตํ•ด๋ณด๋„๋ก ํ•˜๊ฒ ์Šต๋‹ˆ๋‹ค. [`LMSDiscreteScheduler`]์„ ์ผ๋ฐ˜์ ์œผ๋กœ ๋” ์ข‹์€ ๊ฒฐ๊ณผ๋ฅผ ๋ณด์—ฌ์ค๋‹ˆ๋‹ค. ```python from diffusers import LMSDiscreteScheduler pipeline.scheduler = LMSDiscreteScheduler.from_config(pipeline.scheduler.config) generator = torch.Generator(device="cuda").manual_seed(8) image = pipeline(prompt, generator=generator).images[0] image ``` <p align="center"> <br> <img src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/diffusers_docs/astronaut_lms.png" width="400"/> <br> </p> [`EulerDiscreteScheduler`]์™€ [`EulerAncestralDiscreteScheduler`] ๊ณ ์ž‘ 30๋ฒˆ์˜ inference step๋งŒ์œผ๋กœ๋„ ๋†’์€ ํ€„๋ฆฌํ‹ฐ์˜ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•˜๋Š” ๊ฒƒ์„ ์•Œ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ```python from diffusers import EulerDiscreteScheduler pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config) generator = torch.Generator(device="cuda").manual_seed(8) image = pipeline(prompt, generator=generator, num_inference_steps=30).images[0] image ``` <p align="center"> <br> <img src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/diffusers_docs/astronaut_euler_discrete.png" width="400"/> <br> </p> ```python from diffusers import EulerAncestralDiscreteScheduler pipeline.scheduler = EulerAncestralDiscreteScheduler.from_config(pipeline.scheduler.config) generator = torch.Generator(device="cuda").manual_seed(8) image = pipeline(prompt, generator=generator, num_inference_steps=30).images[0] image ``` <p align="center"> <br> <img src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/diffusers_docs/astronaut_euler_ancestral.png" width="400"/> <br> </p> ์ง€๊ธˆ ์ด ๋ฌธ์„œ๋ฅผ ์ž‘์„ฑํ•˜๋Š” ํ˜„์‹œ์  ๊ธฐ์ค€์—์„ , [`DPMSolverMultistepScheduler`]๊ฐ€ ์‹œ๊ฐ„ ๋Œ€๋น„ ๊ฐ€์žฅ ์ข‹์€ ํ’ˆ์งˆ์˜ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•˜๋Š” ๊ฒƒ ๊ฐ™์Šต๋‹ˆ๋‹ค. 20๋ฒˆ ์ •๋„์˜ ์Šคํ…๋งŒ์œผ๋กœ๋„ ์‹คํ–‰๋  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ```python from diffusers import DPMSolverMultistepScheduler pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) generator = torch.Generator(device="cuda").manual_seed(8) image = pipeline(prompt, generator=generator, num_inference_steps=20).images[0] image ``` <p align="center"> <br> <img src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/diffusers_docs/astronaut_dpm.png" width="400"/> <br> </p> ๋ณด์‹œ๋‹ค์‹œํ”ผ ์ƒ์„ฑ๋œ ์ด๋ฏธ์ง€๋“ค์€ ๋งค์šฐ ๋น„์Šทํ•˜๊ณ , ๋น„์Šทํ•œ ํ€„๋ฆฌํ‹ฐ๋ฅผ ๋ณด์ด๋Š” ๊ฒƒ ๊ฐ™์Šต๋‹ˆ๋‹ค. ์‹ค์ œ๋กœ ์–ด๋–ค ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ์„ ํƒํ•  ๊ฒƒ์ธ๊ฐ€๋Š” ์ข…์ข… ํŠน์ • ์ด์šฉ ์‚ฌ๋ก€์— ๊ธฐ๋ฐ˜ํ•ด์„œ ๊ฒฐ์ •๋˜๊ณค ํ•ฉ๋‹ˆ๋‹ค. ๊ฒฐ๊ตญ ์—ฌ๋Ÿฌ ์ข…๋ฅ˜์˜ ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ์ง์ ‘ ์‹คํ–‰์‹œ์ผœ๋ณด๊ณ  ๋ˆˆ์œผ๋กœ ์ง์ ‘ ๋น„๊ตํ•ด์„œ ํŒ๋‹จํ•˜๋Š” ๊ฒŒ ์ข‹์€ ์„ ํƒ์ผ ๊ฒƒ ๊ฐ™์Šต๋‹ˆ๋‹ค. ## Flax์—์„œ ์Šค์ผ€์ค„๋Ÿฌ ๊ต์ฒดํ•˜๊ธฐ JAX/Flax ์‚ฌ์šฉ์ž์ธ ๊ฒฝ์šฐ ๊ธฐ๋ณธ ํŒŒ์ดํ”„๋ผ์ธ ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ๋ณ€๊ฒฝํ•  ์ˆ˜๋„ ์žˆ์Šต๋‹ˆ๋‹ค. ๋‹ค์Œ์€ Flax Stable Diffusion ํŒŒ์ดํ”„๋ผ์ธ๊ณผ ์ดˆ๊ณ ์† [DDPM-Solver++ ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ](../api/schedulers/multistep_dpm_solver) ์‚ฌ์šฉํ•˜์—ฌ ์ถ”๋ก ์„ ์‹คํ–‰ํ•˜๋Š” ๋ฐฉ๋ฒ•์— ๋Œ€ํ•œ ์˜ˆ์‹œ์ž…๋‹ˆ๋‹ค . ```Python import jax import numpy as np from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxStableDiffusionPipeline, FlaxDPMSolverMultistepScheduler model_id = "runwayml/stable-diffusion-v1-5" scheduler, scheduler_state = FlaxDPMSolverMultistepScheduler.from_pretrained( model_id, subfolder="scheduler" ) pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( model_id, scheduler=scheduler, revision="bf16", dtype=jax.numpy.bfloat16, ) params["scheduler"] = scheduler_state # Generate 1 image per parallel device (8 on TPUv2-8 or TPUv3-8) prompt = "a photo of an astronaut riding a horse on mars" num_samples = jax.device_count() prompt_ids = pipeline.prepare_inputs([prompt] * num_samples) prng_seed = jax.random.PRNGKey(0) num_inference_steps = 25 # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, jax.device_count()) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) ``` <Tip warning={true}> ๋‹ค์Œ Flax ์Šค์ผ€์ค„๋Ÿฌ๋Š” *์•„์ง* Flax Stable Diffusion ํŒŒ์ดํ”„๋ผ์ธ๊ณผ ํ˜ธํ™˜๋˜์ง€ ์•Š์Šต๋‹ˆ๋‹ค. - `FlaxLMSDiscreteScheduler` - `FlaxDDPMScheduler` </Tip>
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/using-diffusers/using_safetensors.md
# ์„ธ์ดํ”„ํ…์„œ ๋กœ๋“œ [safetensors](https://github.com/huggingface/safetensors)๋Š” ํ…์„œ๋ฅผ ์ €์žฅํ•˜๊ณ  ๋กœ๋“œํ•˜๊ธฐ ์œ„ํ•œ ์•ˆ์ „ํ•˜๊ณ  ๋น ๋ฅธ ํŒŒ์ผ ํ˜•์‹์ž…๋‹ˆ๋‹ค. ์ผ๋ฐ˜์ ์œผ๋กœ PyTorch ๋ชจ๋ธ ๊ฐ€์ค‘์น˜๋Š” Python์˜ [`pickle`](https://docs.python.org/3/library/pickle.html) ์œ ํ‹ธ๋ฆฌํ‹ฐ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ `.bin` ํŒŒ์ผ์— ์ €์žฅ๋˜๊ฑฐ๋‚˜ `ํ”ผํด`๋ฉ๋‹ˆ๋‹ค. ๊ทธ๋Ÿฌ๋‚˜ `ํ”ผํด`์€ ์•ˆ์ „ํ•˜์ง€ ์•Š์œผ๋ฉฐ ํ”ผํด๋œ ํŒŒ์ผ์—๋Š” ์‹คํ–‰๋  ์ˆ˜ ์žˆ๋Š” ์•…์„ฑ ์ฝ”๋“œ๊ฐ€ ํฌํ•จ๋  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์„ธ์ดํ”„ํ…์„œ๋Š” `ํ”ผํด`์˜ ์•ˆ์ „ํ•œ ๋Œ€์•ˆ์œผ๋กœ ๋ชจ๋ธ ๊ฐ€์ค‘์น˜๋ฅผ ๊ณต์œ ํ•˜๋Š” ๋ฐ ์ด์ƒ์ ์ž…๋‹ˆ๋‹ค. ์ด ๊ฐ€์ด๋“œ์—์„œ๋Š” `.safetensor` ํŒŒ์ผ์„ ๋กœ๋“œํ•˜๋Š” ๋ฐฉ๋ฒ•๊ณผ ๋‹ค๋ฅธ ํ˜•์‹์œผ๋กœ ์ €์žฅ๋œ ์•ˆ์ •์  ํ™•์‚ฐ ๋ชจ๋ธ ๊ฐ€์ค‘์น˜๋ฅผ `.safetensor`๋กœ ๋ณ€ํ™˜ํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ๋ณด์—ฌ๋“œ๋ฆฌ๊ฒ ์Šต๋‹ˆ๋‹ค. ์‹œ์ž‘ํ•˜๊ธฐ ์ „์— ์„ธ์ดํ”„ํ…์„œ๊ฐ€ ์„ค์น˜๋˜์–ด ์žˆ๋Š”์ง€ ํ™•์ธํ•˜์„ธ์š”: ```bash !pip install safetensors ``` ['runwayml/stable-diffusion-v1-5`] (https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/main) ๋ฆฌํฌ์ง€ํ† ๋ฆฌ๋ฅผ ๋ณด๋ฉด `text_encoder`, `unet` ๋ฐ `vae` ํ•˜์œ„ ํด๋”์— ๊ฐ€์ค‘์น˜๊ฐ€ `.safetensors` ํ˜•์‹์œผ๋กœ ์ €์žฅ๋˜์–ด ์žˆ๋Š” ๊ฒƒ์„ ๋ณผ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๊ธฐ๋ณธ์ ์œผ๋กœ ๐Ÿค— ๋””ํ“จ์ €๋Š” ๋ชจ๋ธ ์ €์žฅ์†Œ์—์„œ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ๋Š” ๊ฒฝ์šฐ ํ•ด๋‹น ํ•˜์œ„ ํด๋”์—์„œ ์ด๋Ÿฌํ•œ '.safetensors` ํŒŒ์ผ์„ ์ž๋™์œผ๋กœ ๋กœ๋“œํ•ฉ๋‹ˆ๋‹ค. ๋ณด๋‹ค ๋ช…์‹œ์ ์ธ ์ œ์–ด๋ฅผ ์œ„ํ•ด ์„ ํƒ์ ์œผ๋กœ `์‚ฌ์šฉ_์„ธ์ดํ”„ํ…์„œ=True`๋ฅผ ์„ค์ •ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค(`์„ธ์ดํ”„ํ…์„œ`๊ฐ€ ์„ค์น˜๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ ์„ค์น˜ํ•˜๋ผ๋Š” ์˜ค๋ฅ˜ ๋ฉ”์‹œ์ง€๊ฐ€ ํ‘œ์‹œ๋จ): ```py from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", use_safetensors=True) ``` ๊ทธ๋Ÿฌ๋‚˜ ๋ชจ๋ธ ๊ฐ€์ค‘์น˜๊ฐ€ ์œ„์˜ ์˜ˆ์‹œ์ฒ˜๋Ÿผ ๋ฐ˜๋“œ์‹œ ๋ณ„๋„์˜ ํ•˜์œ„ ํด๋”์— ์ €์žฅ๋˜๋Š” ๊ฒƒ์€ ์•„๋‹™๋‹ˆ๋‹ค. ๋ชจ๋“  ๊ฐ€์ค‘์น˜๊ฐ€ ํ•˜๋‚˜์˜ '.safetensors` ํŒŒ์ผ์— ์ €์žฅ๋˜๋Š” ๊ฒฝ์šฐ๋„ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด ๊ฒฝ์šฐ ๊ฐ€์ค‘์น˜๊ฐ€ Stable Diffusion ๊ฐ€์ค‘์น˜์ธ ๊ฒฝ์šฐ [`~diffusers.loaders.FromCkptMixin.from_ckpt`] ๋ฉ”์„œ๋“œ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ํŒŒ์ผ์„ ์ง์ ‘ ๋กœ๋“œํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```py from diffusers import StableDiffusionPipeline pipeline = StableDiffusionPipeline.from_ckpt( "https://huggingface.co/WarriorMama777/OrangeMixs/blob/main/Models/AbyssOrangeMix/AbyssOrangeMix.safetensors" ) ``` ## ์„ธ์ดํ”„ํ…์„œ๋กœ ๋ณ€ํ™˜ ํ—ˆ๋ธŒ์˜ ๋ชจ๋“  ๊ฐ€์ค‘์น˜๋ฅผ '.safetensors` ํ˜•์‹์œผ๋กœ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ๋Š” ๊ฒƒ์€ ์•„๋‹ˆ๋ฉฐ, '.bin`์œผ๋กœ ์ €์žฅ๋œ ๊ฐ€์ค‘์น˜๊ฐ€ ์žˆ์„ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด ๊ฒฝ์šฐ [Convert Space](https://huggingface.co/spaces/diffusers/convert)์„ ์‚ฌ์šฉํ•˜์—ฌ ๊ฐ€์ค‘์น˜๋ฅผ '.safetensors'๋กœ ๋ณ€ํ™˜ํ•˜์„ธ์š”. Convert Space๋Š” ํ”ผํด๋œ ๊ฐ€์ค‘์น˜๋ฅผ ๋‹ค์šด๋กœ๋“œํ•˜์—ฌ ๋ณ€ํ™˜ํ•œ ํ›„ ํ’€ ๋ฆฌํ€˜์ŠคํŠธ๋ฅผ ์—ด์–ด ํ—ˆ๋ธŒ์— ์ƒˆ๋กœ ๋ณ€ํ™˜๋œ `.safetensors` ํŒŒ์ผ์„ ์—…๋กœ๋“œํ•ฉ๋‹ˆ๋‹ค. ์ด๋ ‡๊ฒŒ ํ•˜๋ฉด ํ”ผํด๋œ ํŒŒ์ผ์— ์•…์„ฑ ์ฝ”๋“œ๊ฐ€ ํฌํ•จ๋˜์–ด ์žˆ๋Š” ๊ฒฝ์šฐ, ์•ˆ์ „ํ•˜์ง€ ์•Š์€ ํŒŒ์ผ๊ณผ ์˜์‹ฌ์Šค๋Ÿฌ์šด ํ”ผํด ๊ฐ€์ ธ์˜ค๊ธฐ๋ฅผ ํƒ์ง€ํ•˜๋Š” [๋ณด์•ˆ ์Šค์บ๋„ˆ](https://huggingface.co/docs/hub/security-pickle#hubs-security-scanner)๊ฐ€ ์žˆ๋Š” ํ—ˆ๋ธŒ๋กœ ์—…๋กœ๋“œ๋ฉ๋‹ˆ๋‹ค. - ๊ฐœ๋ณ„ ์ปดํ“จํ„ฐ๊ฐ€ ์•„๋‹Œ. ๊ฐœ์ •` ๋งค๊ฐœ๋ณ€์ˆ˜์— ํ’€ ๋ฆฌํ€˜์ŠคํŠธ์— ๋Œ€ํ•œ ์ฐธ์กฐ๋ฅผ ์ง€์ •ํ•˜์—ฌ ์ƒˆ๋กœ์šด '.safetensors` ๊ฐ€์ค‘์น˜๊ฐ€ ์ ์šฉ๋œ ๋ชจ๋ธ์„ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค(ํ—ˆ๋ธŒ์˜ [Check PR](https://huggingface.co/spaces/diffusers/check_pr) ๊ณต๊ฐ„์—์„œ ํ…Œ์ŠคํŠธํ•  ์ˆ˜๋„ ์žˆ์Œ)(์˜ˆ: `refs/pr/22`): ```py from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1", revision="refs/pr/22") ``` ## ์„ธ์ดํ”„์„ผ์„œ๋ฅผ ์‚ฌ์šฉํ•˜๋Š” ์ด์œ ๋Š” ๋ฌด์—‡์ธ๊ฐ€์š”? ์„ธ์ดํ”„ํ‹ฐ ์„ผ์„œ๋ฅผ ์‚ฌ์šฉํ•˜๋Š” ๋ฐ์—๋Š” ์—ฌ๋Ÿฌ ๊ฐ€์ง€ ์ด์œ ๊ฐ€ ์žˆ์Šต๋‹ˆ๋‹ค: - ์„ธ์ดํ”„ํ…์„œ๋ฅผ ์‚ฌ์šฉํ•˜๋Š” ๊ฐ€์žฅ ํฐ ์ด์œ ๋Š” ์•ˆ์ „์ž…๋‹ˆ๋‹ค.์˜คํ”ˆ ์†Œ์Šค ๋ฐ ๋ชจ๋ธ ๋ฐฐํฌ๊ฐ€ ์ฆ๊ฐ€ํ•จ์— ๋”ฐ๋ผ ๋‹ค์šด๋กœ๋“œํ•œ ๋ชจ๋ธ ๊ฐ€์ค‘์น˜์— ์•…์„ฑ ์ฝ”๋“œ๊ฐ€ ํฌํ•จ๋˜์–ด ์žˆ์ง€ ์•Š๋‹ค๋Š” ๊ฒƒ์„ ์‹ ๋ขฐํ•  ์ˆ˜ ์žˆ๋Š” ๊ฒƒ์ด ์ค‘์š”ํ•ด์กŒ์Šต๋‹ˆ๋‹ค.์„ธ์ดํ”„์„ผ์„œ์˜ ํ˜„์žฌ ํ—ค๋” ํฌ๊ธฐ๋Š” ๋งค์šฐ ํฐ JSON ํŒŒ์ผ์„ ๊ตฌ๋ฌธ ๋ถ„์„ํ•˜์ง€ ๋ชปํ•˜๊ฒŒ ํ•ฉ๋‹ˆ๋‹ค. - ๋ชจ๋ธ ์ „ํ™˜ ๊ฐ„์˜ ๋กœ๋”ฉ ์†๋„๋Š” ํ…์„œ์˜ ์ œ๋กœ ์นดํ”ผ๋ฅผ ์ˆ˜ํ–‰ํ•˜๋Š” ์„ธ์ดํ”„ํ…์„œ๋ฅผ ์‚ฌ์šฉํ•ด์•ผ ํ•˜๋Š” ๋˜ ๋‹ค๋ฅธ ์ด์œ ์ž…๋‹ˆ๋‹ค. ๊ฐ€์ค‘์น˜๋ฅผ CPU(๊ธฐ๋ณธ๊ฐ’)๋กœ ๋กœ๋“œํ•˜๋Š” ๊ฒฝ์šฐ 'ํ”ผํด'์— ๋น„ํ•ด ํŠนํžˆ ๋น ๋ฅด๋ฉฐ, ๊ฐ€์ค‘์น˜๋ฅผ GPU๋กœ ์ง์ ‘ ๋กœ๋“œํ•˜๋Š” ๊ฒฝ์šฐ์—๋„ ๋น ๋ฅด์ง€๋Š” ์•Š๋”๋ผ๋„ ๋น„์Šทํ•˜๊ฒŒ ๋น ๋ฆ…๋‹ˆ๋‹ค. ๋ชจ๋ธ์ด ์ด๋ฏธ ๋กœ๋“œ๋œ ๊ฒฝ์šฐ์—๋งŒ ์„ฑ๋Šฅ ์ฐจ์ด๋ฅผ ๋А๋‚„ ์ˆ˜ ์žˆ์œผ๋ฉฐ, ๊ฐ€์ค‘์น˜๋ฅผ ๋‹ค์šด๋กœ๋“œํ•˜๊ฑฐ๋‚˜ ๋ชจ๋ธ์„ ์ฒ˜์Œ ๋กœ๋“œํ•˜๋Š” ๊ฒฝ์šฐ์—๋Š” ์„ฑ๋Šฅ ์ฐจ์ด๋ฅผ ๋А๋ผ์ง€ ๋ชปํ•  ๊ฒƒ์ž…๋‹ˆ๋‹ค. ์ „์ฒด ํŒŒ์ดํ”„๋ผ์ธ์„ ๋กœ๋“œํ•˜๋Š” ๋ฐ ๊ฑธ๋ฆฌ๋Š” ์‹œ๊ฐ„์ž…๋‹ˆ๋‹ค: ```py from diffusers import StableDiffusionPipeline pipeline = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1") "Loaded in safetensors 0:00:02.033658" "Loaded in PyTorch 0:00:02.663379" ``` ํ•˜์ง€๋งŒ ์‹ค์ œ๋กœ 500MB์˜ ๋ชจ๋ธ ๊ฐ€์ค‘์น˜๋ฅผ ๋กœ๋“œํ•˜๋Š” ๋ฐ ๊ฑธ๋ฆฌ๋Š” ์‹œ๊ฐ„์€ ์–ผ๋งˆ ๋˜์ง€ ์•Š์Šต๋‹ˆ๋‹ค: ```bash safetensors: 3.4873ms PyTorch: 172.7537ms ``` ์ง€์—ฐ ๋กœ๋”ฉ์€ ์„ธ์ดํ”„ํ…์„œ์—์„œ๋„ ์ง€์›๋˜๋ฉฐ, ์ด๋Š” ๋ถ„์‚ฐ ์„ค์ •์—์„œ ์ผ๋ถ€ ํ…์„œ๋งŒ ๋กœ๋“œํ•˜๋Š” ๋ฐ ์œ ์šฉํ•ฉ๋‹ˆ๋‹ค. ์ด ํ˜•์‹์„ ์‚ฌ์šฉํ•˜๋ฉด [BLOOM](https://huggingface.co/bigscience/bloom) ๋ชจ๋ธ์„ ์ผ๋ฐ˜ PyTorch ๊ฐ€์ค‘์น˜๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ 10๋ถ„์ด ๊ฑธ๋ฆฌ๋˜ ๊ฒƒ์„ 8๊ฐœ์˜ GPU์—์„œ 45์ดˆ ๋งŒ์— ๋กœ๋“œํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค.
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/using-diffusers/loading.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ํŒŒ์ดํ”„๋ผ์ธ, ๋ชจ๋ธ, ์Šค์ผ€์ค„๋Ÿฌ ๋ถˆ๋Ÿฌ์˜ค๊ธฐ ๊ธฐ๋ณธ์ ์œผ๋กœ diffusion ๋ชจ๋ธ์€ ๋‹ค์–‘ํ•œ ์ปดํฌ๋„ŒํŠธ๋“ค(๋ชจ๋ธ, ํ† ํฌ๋‚˜์ด์ €, ์Šค์ผ€์ค„๋Ÿฌ) ๊ฐ„์˜ ๋ณต์žกํ•œ ์ƒํ˜ธ์ž‘์šฉ์„ ๊ธฐ๋ฐ˜์œผ๋กœ ๋™์ž‘ํ•ฉ๋‹ˆ๋‹ค. ๋””ํ“จ์ €์Šค(Diffusers)๋Š” ์ด๋Ÿฌํ•œ diffusion ๋ชจ๋ธ์„ ๋ณด๋‹ค ์‰ฝ๊ณ  ๊ฐ„ํŽธํ•œ API๋กœ ์ œ๊ณตํ•˜๋Š” ๊ฒƒ์„ ๋ชฉํ‘œ๋กœ ์„ค๊ณ„๋˜์—ˆ์Šต๋‹ˆ๋‹ค. [`DiffusionPipeline`]์€ diffusion ๋ชจ๋ธ์ด ๊ฐ–๋Š” ๋ณต์žก์„ฑ์„ ํ•˜๋‚˜์˜ ํŒŒ์ดํ”„๋ผ์ธ API๋กœ ํ†ตํ•ฉํ•˜๊ณ , ๋™์‹œ์— ์ด๋ฅผ ๊ตฌ์„ฑํ•˜๋Š” ๊ฐ๊ฐ์˜ ์ปดํฌ๋„ŒํŠธ๋“ค์„ ํƒœ์Šคํฌ์— ๋งž์ถฐ ์œ ์—ฐํ•˜๊ฒŒ ์ปค์Šคํ„ฐ๋งˆ์ด์ง•ํ•  ์ˆ˜ ์žˆ๋„๋ก ์ง€์›ํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค. diffusion ๋ชจ๋ธ์˜ ํ›ˆ๋ จ๊ณผ ์ถ”๋ก ์— ํ•„์š”ํ•œ ๋ชจ๋“  ๊ฒƒ์€ [`DiffusionPipeline.from_pretrained`] ๋ฉ”์„œ๋“œ๋ฅผ ํ†ตํ•ด ์ ‘๊ทผํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. (์ด ๋ง์˜ ์˜๋ฏธ๋Š” ๋‹ค์Œ ๋‹จ๋ฝ์—์„œ ๋ณด๋‹ค ์ž์„ธํ•˜๊ฒŒ ๋‹ค๋ค„๋ณด๋„๋ก ํ•˜๊ฒ ์Šต๋‹ˆ๋‹ค.) ์ด ๋ฌธ์„œ์—์„œ๋Š” ์„ค๋ช…ํ•  ๋‚ด์šฉ์€ ๋‹ค์Œ๊ณผ ๊ฐ™์Šต๋‹ˆ๋‹ค. * ํ—ˆ๋ธŒ๋ฅผ ํ†ตํ•ด ํ˜น์€ ๋กœ์ปฌ๋กœ ํŒŒ์ดํ”„๋ผ์ธ์„ ๋ถˆ๋Ÿฌ์˜ค๋Š” ๋ฒ• * ํŒŒ์ดํ”„๋ผ์ธ์— ๋‹ค๋ฅธ ์ปดํฌ๋„ŒํŠธ๋“ค์„ ์ ์šฉํ•˜๋Š” ๋ฒ• * ์˜ค๋ฆฌ์ง€๋„ ์ฒดํฌํฌ์ธํŠธ๊ฐ€ ์•„๋‹Œ variant๋ฅผ ๋ถˆ๋Ÿฌ์˜ค๋Š” ๋ฒ• (variant๋ž€ ๊ธฐ๋ณธ์œผ๋กœ ์„ค์ •๋œ `fp32`๊ฐ€ ์•„๋‹Œ ๋‹ค๋ฅธ ๋ถ€๋™ ์†Œ์ˆ˜์  ํƒ€์ž…(์˜ˆ: `fp16`)์„ ์‚ฌ์šฉํ•˜๊ฑฐ๋‚˜ Non-EMA ๊ฐ€์ค‘์น˜๋ฅผ ์‚ฌ์šฉํ•˜๋Š” ์ฒดํฌํฌ์ธํŠธ๋“ค์„ ์˜๋ฏธํ•ฉ๋‹ˆ๋‹ค.) * ๋ชจ๋ธ๊ณผ ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ๋ถˆ๋Ÿฌ์˜ค๋Š” ๋ฒ• ## Diffusion ํŒŒ์ดํ”„๋ผ์ธ <Tip> ๐Ÿ’ก [`DiffusionPipeline`] ํด๋ž˜์Šค๊ฐ€ ๋™์ž‘ํ•˜๋Š” ๋ฐฉ์‹์— ๋ณด๋‹ค ์ž์„ธํ•œ ๋‚ด์šฉ์ด ๊ถ๊ธˆํ•˜๋‹ค๋ฉด, [DiffusionPipeline explained](#diffusionpipeline์—-๋Œ€ํ•ด-์•Œ์•„๋ณด๊ธฐ) ์„น์…˜์„ ํ™•์ธํ•ด๋ณด์„ธ์š”. </Tip> [`DiffusionPipeline`] ํด๋ž˜์Šค๋Š” diffusion ๋ชจ๋ธ์„ [ํ—ˆ๋ธŒ](https://huggingface.co/models?library=diffusers)๋กœ๋ถ€ํ„ฐ ๋ถˆ๋Ÿฌ์˜ค๋Š” ๊ฐ€์žฅ ์‹ฌํ”Œํ•˜๋ฉด์„œ ๋ณดํŽธ์ ์ธ ๋ฐฉ์‹์ž…๋‹ˆ๋‹ค. [`DiffusionPipeline.from_pretrained`] ๋ฉ”์„œ๋“œ๋Š” ์ ํ•ฉํ•œ ํŒŒ์ดํ”„๋ผ์ธ ํด๋ž˜์Šค๋ฅผ ์ž๋™์œผ๋กœ ํƒ์ง€ํ•˜๊ณ , ํ•„์š”ํ•œ ๊ตฌ์„ฑ์š”์†Œ(configuration)์™€ ๊ฐ€์ค‘์น˜(weight) ํŒŒ์ผ๋“ค์„ ๋‹ค์šด๋กœ๋“œํ•˜๊ณ  ์บ์‹ฑํ•œ ๋‹ค์Œ, ํ•ด๋‹น ํŒŒ์ดํ”„๋ผ์ธ ์ธ์Šคํ„ด์Šค๋ฅผ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค. ```python from diffusers import DiffusionPipeline repo_id = "runwayml/stable-diffusion-v1-5" pipe = DiffusionPipeline.from_pretrained(repo_id) ``` ๋ฌผ๋ก  [`DiffusionPipeline`] ํด๋ž˜์Šค๋ฅผ ์‚ฌ์šฉํ•˜์ง€ ์•Š๊ณ , ๋ช…์‹œ์ ์œผ๋กœ ์ง์ ‘ ํ•ด๋‹น ํŒŒ์ดํ”„๋ผ์ธ ํด๋ž˜์Šค๋ฅผ ๋ถˆ๋Ÿฌ์˜ค๋Š” ๊ฒƒ๋„ ๊ฐ€๋Šฅํ•ฉ๋‹ˆ๋‹ค. ์•„๋ž˜ ์˜ˆ์‹œ ์ฝ”๋“œ๋Š” ์œ„ ์˜ˆ์‹œ์™€ ๋™์ผํ•œ ์ธ์Šคํ„ด์Šค๋ฅผ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค. ```python from diffusers import StableDiffusionPipeline repo_id = "runwayml/stable-diffusion-v1-5" pipe = StableDiffusionPipeline.from_pretrained(repo_id) ``` [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4)์ด๋‚˜ [runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) ๊ฐ™์€ ์ฒดํฌํฌ์ธํŠธ๋“ค์˜ ๊ฒฝ์šฐ, ํ•˜๋‚˜ ์ด์ƒ์˜ ๋‹ค์–‘ํ•œ ํƒœ์Šคํฌ์— ํ™œ์šฉ๋  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. (์˜ˆ๋ฅผ ๋“ค์–ด ์œ„์˜ ๋‘ ์ฒดํฌํฌ์ธํŠธ์˜ ๊ฒฝ์šฐ, text-to-image์™€ image-to-image์— ๋ชจ๋‘ ํ™œ์šฉ๋  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค.) ๋งŒ์•ฝ ์ด๋Ÿฌํ•œ ์ฒดํฌํฌ์ธํŠธ๋“ค์„ ๊ธฐ๋ณธ ์„ค์ • ํƒœ์Šคํฌ๊ฐ€ ์•„๋‹Œ ๋‹ค๋ฅธ ํƒœ์Šคํฌ์— ํ™œ์šฉํ•˜๊ณ ์ž ํ•œ๋‹ค๋ฉด, ํ•ด๋‹น ํƒœ์Šคํฌ์— ๋Œ€์‘๋˜๋Š” ํŒŒ์ดํ”„๋ผ์ธ(task-specific pipeline)์„ ์‚ฌ์šฉํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ```python from diffusers import StableDiffusionImg2ImgPipeline repo_id = "runwayml/stable-diffusion-v1-5" pipe = StableDiffusionImg2ImgPipeline.from_pretrained(repo_id) ``` ### ๋กœ์ปฌ ํŒŒ์ดํ”„๋ผ์ธ ํŒŒ์ดํ”„๋ผ์ธ์„ ๋กœ์ปฌ๋กœ ๋ถˆ๋Ÿฌ์˜ค๊ณ ์ž ํ•œ๋‹ค๋ฉด, `git-lfs`๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์ง์ ‘ ์ฒดํฌํฌ์ธํŠธ๋ฅผ ๋กœ์ปฌ ๋””์Šคํฌ์— ๋‹ค์šด๋กœ๋“œ ๋ฐ›์•„์•ผ ํ•ฉ๋‹ˆ๋‹ค. ์•„๋ž˜์˜ ๋ช…๋ น์–ด๋ฅผ ์‹คํ–‰ํ•˜๋ฉด `./stable-diffusion-v1-5`๋ž€ ์ด๋ฆ„์œผ๋กœ ํด๋”๊ฐ€ ๋กœ์ปฌ๋””์Šคํฌ์— ์ƒ์„ฑ๋ฉ๋‹ˆ๋‹ค. ```bash git lfs install git clone https://huggingface.co/runwayml/stable-diffusion-v1-5 ``` ๊ทธ๋Ÿฐ ๋‹ค์Œ ํ•ด๋‹น ๋กœ์ปฌ ๊ฒฝ๋กœ๋ฅผ [`~DiffusionPipeline.from_pretrained`] ๋ฉ”์„œ๋“œ์— ์ „๋‹ฌํ•ฉ๋‹ˆ๋‹ค. ```python from diffusers import DiffusionPipeline repo_id = "./stable-diffusion-v1-5" stable_diffusion = DiffusionPipeline.from_pretrained(repo_id) ``` ์œ„์˜ ์˜ˆ์‹œ์ฝ”๋“œ์ฒ˜๋Ÿผ ๋งŒ์•ฝ `repo_id`๊ฐ€ ๋กœ์ปฌ ํŒจ์Šค(local path)๋ผ๋ฉด, [`~DiffusionPipeline.from_pretrained`] ๋ฉ”์„œ๋“œ๋Š” ์ด๋ฅผ ์ž๋™์œผ๋กœ ๊ฐ์ง€ํ•˜์—ฌ ํ—ˆ๋ธŒ์—์„œ ํŒŒ์ผ์„ ๋‹ค์šด๋กœ๋“œํ•˜์ง€ ์•Š์Šต๋‹ˆ๋‹ค. ๋งŒ์•ฝ ๋กœ์ปฌ ๋””์Šคํฌ์— ์ €์žฅ๋œ ํŒŒ์ดํ”„๋ผ์ธ ์ฒดํฌํฌ์ธํŠธ๊ฐ€ ์ตœ์‹  ๋ฒ„์ „์ด ์•„๋‹ ๊ฒฝ์šฐ์—๋„, ์ตœ์‹  ๋ฒ„์ „์„ ๋‹ค์šด๋กœ๋“œํ•˜์ง€ ์•Š๊ณ  ๊ธฐ์กด ๋กœ์ปฌ ๋””์Šคํฌ์— ์ €์žฅ๋œ ์ฒดํฌํฌ์ธํŠธ๋ฅผ ์‚ฌ์šฉํ•œ๋‹ค๋Š” ๊ฒƒ์„ ์˜๋ฏธํ•ฉ๋‹ˆ๋‹ค. ### ํŒŒ์ดํ”„๋ผ์ธ ๋‚ด๋ถ€์˜ ์ปดํฌ๋„ŒํŠธ ๊ต์ฒดํ•˜๊ธฐ ํŒŒ์ดํ”„๋ผ์ธ ๋‚ด๋ถ€์˜ ์ปดํฌ๋„ŒํŠธ๋“ค์€ ํ˜ธํ™˜ ๊ฐ€๋Šฅํ•œ ๋‹ค๋ฅธ ์ปดํฌ๋„ŒํŠธ๋กœ ๊ต์ฒด๋  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด์™€ ๊ฐ™์€ ์ปดํฌ๋„ŒํŠธ ๊ต์ฒด๊ฐ€ ์ค‘์š”ํ•œ ์ด์œ ๋Š” ๋‹ค์Œ๊ณผ ๊ฐ™์Šต๋‹ˆ๋‹ค. - ์–ด๋–ค ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ์‚ฌ์šฉํ•  ๊ฒƒ์ธ๊ฐ€๋Š” ์ƒ์„ฑ์†๋„์™€ ์ƒ์„ฑํ’ˆ์งˆ ๊ฐ„์˜ ํŠธ๋ ˆ์ด๋“œ์˜คํ”„๋ฅผ ์ •์˜ํ•˜๋Š” ์ค‘์š”ํ•œ ์š”์†Œ์ž…๋‹ˆ๋‹ค. - diffusion ๋ชจ๋ธ ๋‚ด๋ถ€์˜ ์ปดํฌ๋„ŒํŠธ๋“ค์€ ์ผ๋ฐ˜์ ์œผ๋กœ ๊ฐ๊ฐ ๋…๋ฆฝ์ ์œผ๋กœ ํ›ˆ๋ จ๋˜๊ธฐ ๋•Œ๋ฌธ์—, ๋” ์ข‹์€ ์„ฑ๋Šฅ์„ ๋ณด์—ฌ์ฃผ๋Š” ์ปดํฌ๋„ŒํŠธ๊ฐ€ ์žˆ๋‹ค๋ฉด ๊ทธ๊ฑธ๋กœ ๊ต์ฒดํ•˜๋Š” ์‹์œผ๋กœ ์„ฑ๋Šฅ์„ ํ–ฅ์ƒ์‹œํ‚ฌ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. - ํŒŒ์ธ ํŠœ๋‹ ๋‹จ๊ณ„์—์„œ๋Š” ์ผ๋ฐ˜์ ์œผ๋กœ UNet ํ˜น์€ ํ…์ŠคํŠธ ์ธ์ฝ”๋”์™€ ๊ฐ™์€ ์ผ๋ถ€ ์ปดํฌ๋„ŒํŠธ๋“ค๋งŒ ํ›ˆ๋ จํ•˜๊ฒŒ ๋ฉ๋‹ˆ๋‹ค. ์–ด๋–ค ์Šค์ผ€์ค„๋Ÿฌ๋“ค์ด ํ˜ธํ™˜๊ฐ€๋Šฅํ•œ์ง€๋Š” `compatibles` ์†์„ฑ์„ ํ†ตํ•ด ํ™•์ธํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ```python from diffusers import DiffusionPipeline repo_id = "runwayml/stable-diffusion-v1-5" stable_diffusion = DiffusionPipeline.from_pretrained(repo_id) stable_diffusion.scheduler.compatibles ``` ์ด๋ฒˆ์—๋Š” [`SchedulerMixin.from_pretrained`] ๋ฉ”์„œ๋“œ๋ฅผ ์‚ฌ์šฉํ•ด์„œ, ๊ธฐ์กด ๊ธฐ๋ณธ ์Šค์ผ€์ค„๋Ÿฌ์˜€๋˜ [`PNDMScheduler`]๋ฅผ ๋ณด๋‹ค ์šฐ์ˆ˜ํ•œ ์„ฑ๋Šฅ์˜ [`EulerDiscreteScheduler`]๋กœ ๋ฐ”๊ฟ”๋ด…์‹œ๋‹ค. ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ๋กœ๋“œํ•  ๋•Œ๋Š” `subfolder` ์ธ์ž๋ฅผ ํ†ตํ•ด, ํ•ด๋‹น ํŒŒ์ดํ”„๋ผ์ธ์˜ ๋ฆฌํฌ์ง€ํ† ๋ฆฌ์—์„œ [์Šค์ผ€์ค„๋Ÿฌ์— ๊ด€ํ•œ ํ•˜์œ„ํด๋”](https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/main/scheduler)๋ฅผ ๋ช…์‹œํ•ด์ฃผ์–ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ๊ทธ ๋‹ค์Œ ์ƒˆ๋กญ๊ฒŒ ์ƒ์„ฑํ•œ [`EulerDiscreteScheduler`] ์ธ์Šคํ„ด์Šค๋ฅผ [`DiffusionPipeline`]์˜ `scheduler` ์ธ์ž์— ์ „๋‹ฌํ•ฉ๋‹ˆ๋‹ค. ```python from diffusers import DiffusionPipeline, EulerDiscreteScheduler, DPMSolverMultistepScheduler repo_id = "runwayml/stable-diffusion-v1-5" scheduler = EulerDiscreteScheduler.from_pretrained(repo_id, subfolder="scheduler") stable_diffusion = DiffusionPipeline.from_pretrained(repo_id, scheduler=scheduler) ``` ### ์„ธ์ดํ”„ํ‹ฐ ์ฒด์ปค ์Šคํ…Œ์ด๋ธ” diffusion๊ณผ ๊ฐ™์€ diffusion ๋ชจ๋ธ๋“ค์€ ์œ ํ•ดํ•œ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•  ์ˆ˜๋„ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด๋ฅผ ์˜ˆ๋ฐฉํ•˜๊ธฐ ์œ„ํ•ด ๋””ํ“จ์ €์Šค๋Š” ์ƒ์„ฑ๋œ ์ด๋ฏธ์ง€์˜ ์œ ํ•ด์„ฑ์„ ํŒ๋‹จํ•˜๋Š” [์„ธ์ดํ”„ํ‹ฐ ์ฒด์ปค(safety checker)](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py) ๊ธฐ๋Šฅ์„ ์ง€์›ํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค. ๋งŒ์•ฝ ์„ธ์ดํ”„ํ‹ฐ ์ฒด์ปค์˜ ์‚ฌ์šฉ์„ ์›ํ•˜์ง€ ์•Š๋Š”๋‹ค๋ฉด, `safety_checker` ์ธ์ž์— `None`์„ ์ „๋‹ฌํ•ด์ฃผ์‹œ๋ฉด ๋ฉ๋‹ˆ๋‹ค. ```python from diffusers import DiffusionPipeline repo_id = "runwayml/stable-diffusion-v1-5" stable_diffusion = DiffusionPipeline.from_pretrained(repo_id, safety_checker=None) ``` ### ์ปดํฌ๋„ŒํŠธ ์žฌ์‚ฌ์šฉ ๋ณต์ˆ˜์˜ ํŒŒ์ดํ”„๋ผ์ธ์— ๋™์ผํ•œ ๋ชจ๋ธ์ด ๋ฐ˜๋ณต์ ์œผ๋กœ ์‚ฌ์šฉํ•œ๋‹ค๋ฉด, ๊ตณ์ด ํ•ด๋‹น ๋ชจ๋ธ์˜ ๋™์ผํ•œ ๊ฐ€์ค‘์น˜๋ฅผ ์ค‘๋ณต์œผ๋กœ RAM์— ๋ถˆ๋Ÿฌ์˜ฌ ํ•„์š”๋Š” ์—†์„ ๊ฒƒ์ž…๋‹ˆ๋‹ค. [`~DiffusionPipeline.components`] ์†์„ฑ์„ ํ†ตํ•ด ํŒŒ์ดํ”„๋ผ์ธ ๋‚ด๋ถ€์˜ ์ปดํฌ๋„ŒํŠธ๋“ค์„ ์ฐธ์กฐํ•  ์ˆ˜ ์žˆ๋Š”๋ฐ, ์ด๋ฒˆ ๋‹จ๋ฝ์—์„œ๋Š” ์ด๋ฅผ ํ†ตํ•ด ๋™์ผํ•œ ๋ชจ๋ธ ๊ฐ€์ค‘์น˜๋ฅผ RAM์— ์ค‘๋ณต์œผ๋กœ ๋ถˆ๋Ÿฌ์˜ค๋Š” ๊ฒƒ์„ ๋ฐฉ์ง€ํ•˜๋Š” ๋ฒ•์— ๋Œ€ํ•ด ์•Œ์•„๋ณด๊ฒ ์Šต๋‹ˆ๋‹ค. ```python from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline model_id = "runwayml/stable-diffusion-v1-5" stable_diffusion_txt2img = StableDiffusionPipeline.from_pretrained(model_id) components = stable_diffusion_txt2img.components ``` ๊ทธ ๋‹ค์Œ ์œ„ ์˜ˆ์‹œ ์ฝ”๋“œ์—์„œ ์„ ์–ธํ•œ `components` ๋ณ€์ˆ˜๋ฅผ ๋‹ค๋ฅธ ํŒŒ์ดํ”„๋ผ์ธ์— ์ „๋‹ฌํ•จ์œผ๋กœ์จ, ๋ชจ๋ธ์˜ ๊ฐ€์ค‘์น˜๋ฅผ ์ค‘๋ณต์œผ๋กœ RAM์— ๋กœ๋”ฉํ•˜์ง€ ์•Š๊ณ , ๋™์ผํ•œ ์ปดํฌ๋„ŒํŠธ๋ฅผ ์žฌ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ```python stable_diffusion_img2img = StableDiffusionImg2ImgPipeline(**components) ``` ๋ฌผ๋ก  ๊ฐ๊ฐ์˜ ์ปดํฌ๋„ŒํŠธ๋“ค์„ ๋”ฐ๋กœ ๋”ฐ๋กœ ํŒŒ์ดํ”„๋ผ์ธ์— ์ „๋‹ฌํ•  ์ˆ˜๋„ ์žˆ์Šต๋‹ˆ๋‹ค. ์˜ˆ๋ฅผ ๋“ค์–ด `stable_diffusion_txt2img` ํŒŒ์ดํ”„๋ผ์ธ ์•ˆ์˜ ์ปดํฌ๋„ŒํŠธ๋“ค ๊ฐ€์šด๋ฐ์„œ ์„ธ์ดํ”„ํ‹ฐ ์ฒด์ปค(`safety_checker`)์™€ ํ”ผ์ณ ์ต์ŠคํŠธ๋ž™ํ„ฐ(`feature_extractor`)๋ฅผ ์ œ์™ธํ•œ ์ปดํฌ๋„ŒํŠธ๋“ค๋งŒ `stable_diffusion_img2img` ํŒŒ์ดํ”„๋ผ์ธ์—์„œ ์žฌ์‚ฌ์šฉํ•˜๋Š” ๋ฐฉ์‹ ์—ญ์‹œ ๊ฐ€๋Šฅํ•ฉ๋‹ˆ๋‹ค. ```python from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline model_id = "runwayml/stable-diffusion-v1-5" stable_diffusion_txt2img = StableDiffusionPipeline.from_pretrained(model_id) stable_diffusion_img2img = StableDiffusionImg2ImgPipeline( vae=stable_diffusion_txt2img.vae, text_encoder=stable_diffusion_txt2img.text_encoder, tokenizer=stable_diffusion_txt2img.tokenizer, unet=stable_diffusion_txt2img.unet, scheduler=stable_diffusion_txt2img.scheduler, safety_checker=None, feature_extractor=None, requires_safety_checker=False, ) ``` ## Checkpoint variants Variant๋ž€ ์ผ๋ฐ˜์ ์œผ๋กœ ๋‹ค์Œ๊ณผ ๊ฐ™์€ ์ฒดํฌํฌ์ธํŠธ๋“ค์„ ์˜๋ฏธํ•ฉ๋‹ˆ๋‹ค. - `torch.float16`๊ณผ ๊ฐ™์ด ์ •๋ฐ€๋„๋Š” ๋” ๋‚ฎ์ง€๋งŒ, ์šฉ๋Ÿ‰ ์—ญ์‹œ ๋” ์ž‘์€ ๋ถ€๋™์†Œ์ˆ˜์  ํƒ€์ž…์˜ ๊ฐ€์ค‘์น˜๋ฅผ ์‚ฌ์šฉํ•˜๋Š” ์ฒดํฌํฌ์ธํŠธ. *(๋‹ค๋งŒ ์ด์™€ ๊ฐ™์€ variant์˜ ๊ฒฝ์šฐ, ์ถ”๊ฐ€์ ์ธ ํ›ˆ๋ จ๊ณผ CPUํ™˜๊ฒฝ์—์„œ์˜ ๊ตฌ๋™์ด ๋ถˆ๊ฐ€๋Šฅํ•ฉ๋‹ˆ๋‹ค.)* - Non-EMA ๊ฐ€์ค‘์น˜๋ฅผ ์‚ฌ์šฉํ•˜๋Š” ์ฒดํฌํฌ์ธํŠธ. *(Non-EMA ๊ฐ€์ค‘์น˜์˜ ๊ฒฝ์šฐ, ํŒŒ์ธ ํŠœ๋‹ ๋‹จ๊ณ„์—์„œ ์‚ฌ์šฉํ•˜๋Š” ๊ฒƒ์ด ๊ถŒ์žฅ๋˜๋Š”๋ฐ, ์ถ”๋ก  ๋‹จ๊ณ„์—์„  ์‚ฌ์šฉํ•˜์ง€ ์•Š๋Š” ๊ฒƒ์ด ๊ถŒ์žฅ๋ฉ๋‹ˆ๋‹ค.)* <Tip> ๐Ÿ’ก ๋ชจ๋ธ ๊ตฌ์กฐ๋Š” ๋™์ผํ•˜์ง€๋งŒ ์„œ๋กœ ๋‹ค๋ฅธ ํ•™์Šต ํ™˜๊ฒฝ์—์„œ ์„œ๋กœ ๋‹ค๋ฅธ ๋ฐ์ดํ„ฐ์…‹์œผ๋กœ ํ•™์Šต๋œ ์ฒดํฌํฌ์ธํŠธ๋“ค์ด ์žˆ์„ ๊ฒฝ์šฐ, ํ•ด๋‹น ์ฒดํฌํฌ์ธํŠธ๋“ค์€ variant ๋‹จ๊ณ„๊ฐ€ ์•„๋‹Œ ๋ฆฌํฌ์ง€ํ† ๋ฆฌ ๋‹จ๊ณ„์—์„œ ๋ถ„๋ฆฌ๋˜์–ด ๊ด€๋ฆฌ๋˜์–ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. (์ฆ‰, ํ•ด๋‹น ์ฒดํฌํฌ์ธํŠธ๋“ค์€ ์„œ๋กœ ๋‹ค๋ฅธ ๋ฆฌํฌ์ง€ํ† ๋ฆฌ์—์„œ ๋”ฐ๋กœ ๊ด€๋ฆฌ๋˜์–ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ์˜ˆ์‹œ: [`stable-diffusion-v1-4`], [`stable-diffusion-v1-5`]). </Tip> | **checkpoint type** | **weight name** | **argument for loading weights** | | ------------------- | ----------------------------------- | -------------------------------- | | original | diffusion_pytorch_model.bin | | | floating point | diffusion_pytorch_model.fp16.bin | `variant`, `torch_dtype` | | non-EMA | diffusion_pytorch_model.non_ema.bin | `variant` | variant๋ฅผ ๋กœ๋“œํ•  ๋•Œ 2๊ฐœ์˜ ์ค‘์š”ํ•œ argument๊ฐ€ ์žˆ์Šต๋‹ˆ๋‹ค. * `torch_dtype`์€ ๋ถˆ๋Ÿฌ์˜ฌ ์ฒดํฌํฌ์ธํŠธ์˜ ๋ถ€๋™์†Œ์ˆ˜์ ์„ ์ •์˜ํ•ฉ๋‹ˆ๋‹ค. ์˜ˆ๋ฅผ ๋“ค์–ด `torch_dtype=torch.float16`์„ ๋ช…์‹œํ•จ์œผ๋กœ์จ ๊ฐ€์ค‘์น˜์˜ ๋ถ€๋™์†Œ์ˆ˜์  ํƒ€์ž…์„ `fl16`์œผ๋กœ ๋ณ€ํ™˜ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. (๋งŒ์•ฝ ๋”ฐ๋กœ ์„ค์ •ํ•˜์ง€ ์•Š์„ ๊ฒฝ์šฐ, ๊ธฐ๋ณธ๊ฐ’์œผ๋กœ `fp32` ํƒ€์ž…์˜ ๊ฐ€์ค‘์น˜๊ฐ€ ๋กœ๋”ฉ๋ฉ๋‹ˆ๋‹ค.) ๋˜ํ•œ `variant` ์ธ์ž๋ฅผ ๋ช…์‹œํ•˜์ง€ ์•Š์€ ์ฑ„๋กœ ์ฒดํฌํฌ์ธํŠธ๋ฅผ ๋ถˆ๋Ÿฌ์˜จ ๋‹ค์Œ, ํ•ด๋‹น ์ฒดํฌํฌ์ธํŠธ๋ฅผ `torch_dtype=torch.float16` ์ธ์ž๋ฅผ ํ†ตํ•ด `fp16` ํƒ€์ž…์œผ๋กœ ๋ณ€ํ™˜ํ•˜๋Š” ๊ฒƒ ์—ญ์‹œ ๊ฐ€๋Šฅํ•ฉ๋‹ˆ๋‹ค. ์ด ๊ฒฝ์šฐ ๊ธฐ๋ณธ์œผ๋กœ ์„ค์ •๋œ `fp32` ๊ฐ€์ค‘์น˜๊ฐ€ ๋จผ์ € ๋‹ค์šด๋กœ๋“œ๋˜๊ณ , ํ•ด๋‹น ๊ฐ€์ค‘์น˜๋“ค์„ ๋ถˆ๋Ÿฌ์˜จ ๋‹ค์Œ `fp16` ํƒ€์ž…์œผ๋กœ ๋ณ€ํ™˜ํ•˜๊ฒŒ ๋ฉ๋‹ˆ๋‹ค. * `variant` ์ธ์ž๋Š” ๋ฆฌํฌ์ง€ํ† ๋ฆฌ์—์„œ ์–ด๋–ค variant๋ฅผ ๋ถˆ๋Ÿฌ์˜ฌ ๊ฒƒ์ธ๊ฐ€๋ฅผ ์ •์˜ํ•ฉ๋‹ˆ๋‹ค. ๊ฐ€๋ น [`diffusers/stable-diffusion-variants`](https://huggingface.co/diffusers/stable-diffusion-variants/tree/main/unet) ๋ฆฌํฌ์ง€ํ† ๋ฆฌ๋กœ๋ถ€ํ„ฐ `non_ema` ์ฒดํฌํฌ์ธํŠธ๋ฅผ ๋ถˆ๋Ÿฌ์˜ค๊ณ ์ž ํ•œ๋‹ค๋ฉด, `variant="non_ema"` ์ธ์ž๋ฅผ ์ „๋‹ฌํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ```python from diffusers import DiffusionPipeline # load fp16 variant stable_diffusion = DiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", variant="fp16", torch_dtype=torch.float16 ) # load non_ema variant stable_diffusion = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", variant="non_ema") ``` ๋‹ค๋ฅธ ๋ถ€๋™์†Œ์ˆ˜์  ํƒ€์ž…์˜ ๊ฐ€์ค‘์น˜ ํ˜น์€ non-EMA ๊ฐ€์ค‘์น˜๋ฅผ ์‚ฌ์šฉํ•˜๋Š” ์ฒดํฌํฌ์ธํŠธ๋ฅผ ์ €์žฅํ•˜๊ธฐ ์œ„ํ•ด์„œ๋Š”, [`DiffusionPipeline.save_pretrained`] ๋ฉ”์„œ๋“œ๋ฅผ ์‚ฌ์šฉํ•ด์•ผ ํ•˜๋ฉฐ, ์ด ๋•Œ `variant` ์ธ์ž๋ฅผ ๋ช…์‹œํ•ด์ค˜์•ผ ํ•ฉ๋‹ˆ๋‹ค. ์›๋ž˜์˜ ์ฒดํฌํฌ์ธํŠธ์™€ ๋™์ผํ•œ ํด๋”์— variant๋ฅผ ์ €์žฅํ•ด์•ผ ํ•˜๋ฉฐ, ์ด๋ ‡๊ฒŒ ํ•˜๋ฉด ๋™์ผํ•œ ํด๋”์—์„œ ์˜ค๋ฆฌ์ง€๋„ ์ฒดํฌํฌ์ธํŠธ๊ณผ variant๋ฅผ ๋ชจ๋‘ ๋ถˆ๋Ÿฌ์˜ฌ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ```python from diffusers import DiffusionPipeline # save as fp16 variant stable_diffusion.save_pretrained("runwayml/stable-diffusion-v1-5", variant="fp16") # save as non-ema variant stable_diffusion.save_pretrained("runwayml/stable-diffusion-v1-5", variant="non_ema") ``` ๋งŒ์•ฝ variant๋ฅผ ๊ธฐ์กด ํด๋”์— ์ €์žฅํ•˜์ง€ ์•Š์„ ๊ฒฝ์šฐ, `variant` ์ธ์ž๋ฅผ ๋ฐ˜๋“œ์‹œ ๋ช…์‹œํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ๊ทธ๋ ‡๊ฒŒ ํ•˜์ง€ ์•Š์„ ๊ฒฝ์šฐ ์›๋ž˜์˜ ์˜ค๋ฆฌ์ง€๋„ ์ฒดํฌํฌ์ธํŠธ๋ฅผ ์ฐพ์„ ์ˆ˜ ์—†๊ฒŒ ๋˜๊ธฐ ๋•Œ๋ฌธ์— ์—๋Ÿฌ๊ฐ€ ๋ฐœ์ƒํ•ฉ๋‹ˆ๋‹ค. ```python # ๐Ÿ‘Ž this won't work stable_diffusion = DiffusionPipeline.from_pretrained("./stable-diffusion-v1-5", torch_dtype=torch.float16) # ๐Ÿ‘ this works stable_diffusion = DiffusionPipeline.from_pretrained( "./stable-diffusion-v1-5", variant="fp16", torch_dtype=torch.float16 ) ``` ### ๋ชจ๋ธ ๋ถˆ๋Ÿฌ์˜ค๊ธฐ ๋ชจ๋ธ๋“ค์€ [`ModelMixin.from_pretrained`] ๋ฉ”์„œ๋“œ๋ฅผ ํ†ตํ•ด ๋ถˆ๋Ÿฌ์˜ฌ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ํ•ด๋‹น ๋ฉ”์„œ๋“œ๋Š” ์ตœ์‹  ๋ฒ„์ „์˜ ๋ชจ๋ธ ๊ฐ€์ค‘์น˜ ํŒŒ์ผ๊ณผ ์„ค์ • ํŒŒ์ผ(configurations)์„ ๋‹ค์šด๋กœ๋“œํ•˜๊ณ  ์บ์‹ฑํ•ฉ๋‹ˆ๋‹ค. ๋งŒ์•ฝ ์ด๋Ÿฌํ•œ ํŒŒ์ผ๋“ค์ด ์ตœ์‹  ๋ฒ„์ „์œผ๋กœ ๋กœ์ปฌ ์บ์‹œ์— ์ €์žฅ๋˜์–ด ์žˆ๋‹ค๋ฉด, [`ModelMixin.from_pretrained`]๋Š” ๊ตณ์ด ํ•ด๋‹น ํŒŒ์ผ๋“ค์„ ๋‹ค์‹œ ๋‹ค์šด๋กœ๋“œํ•˜์ง€ ์•Š์œผ๋ฉฐ, ๊ทธ์ € ์บ์‹œ์— ์žˆ๋Š” ์ตœ์‹  ํŒŒ์ผ๋“ค์„ ์žฌ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค. ๋ชจ๋ธ์€ `subfolder` ์ธ์ž์— ๋ช…์‹œ๋œ ํ•˜์œ„ ํด๋”๋กœ๋ถ€ํ„ฐ ๋กœ๋“œ๋ฉ๋‹ˆ๋‹ค. ์˜ˆ๋ฅผ ๋“ค์–ด `runwayml/stable-diffusion-v1-5`์˜ UNet ๋ชจ๋ธ์˜ ๊ฐ€์ค‘์น˜๋Š” [`unet`](https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/main/unet) ํด๋”์— ์ €์žฅ๋˜์–ด ์žˆ์Šต๋‹ˆ๋‹ค. ```python from diffusers import UNet2DConditionModel repo_id = "runwayml/stable-diffusion-v1-5" model = UNet2DConditionModel.from_pretrained(repo_id, subfolder="unet") ``` ํ˜น์€ [ํ•ด๋‹น ๋ชจ๋ธ์˜ ๋ฆฌํฌ์ง€ํ† ๋ฆฌ](https://huggingface.co/google/ddpm-cifar10-32/tree/main)๋กœ๋ถ€ํ„ฐ ๋‹ค์ด๋ ‰ํŠธ๋กœ ๊ฐ€์ ธ์˜ค๋Š” ๊ฒƒ ์—ญ์‹œ ๊ฐ€๋Šฅํ•ฉ๋‹ˆ๋‹ค. ```python from diffusers import UNet2DModel repo_id = "google/ddpm-cifar10-32" model = UNet2DModel.from_pretrained(repo_id) ``` ๋˜ํ•œ ์•ž์„œ ๋ดค๋˜ `variant` ์ธ์ž๋ฅผ ๋ช…์‹œํ•จ์œผ๋กœ์จ, Non-EMA๋‚˜ `fp16`์˜ ๊ฐ€์ค‘์น˜๋ฅผ ๊ฐ€์ ธ์˜ค๋Š” ๊ฒƒ ์—ญ์‹œ ๊ฐ€๋Šฅํ•ฉ๋‹ˆ๋‹ค. ```python from diffusers import UNet2DConditionModel model = UNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="unet", variant="non-ema") model.save_pretrained("./local-unet", variant="non-ema") ``` ### ์Šค์ผ€์ค„๋Ÿฌ ์Šค์ผ€์ค„๋Ÿฌ๋“ค์€ [`SchedulerMixin.from_pretrained`] ๋ฉ”์„œ๋“œ๋ฅผ ํ†ตํ•ด ๋ถˆ๋Ÿฌ์˜ฌ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋ชจ๋ธ๊ณผ ๋‹ฌ๋ฆฌ ์Šค์ผ€์ค„๋Ÿฌ๋Š” ๋ณ„๋„์˜ ๊ฐ€์ค‘์น˜๋ฅผ ๊ฐ–์ง€ ์•Š์œผ๋ฉฐ, ๋”ฐ๋ผ์„œ ๋‹น์—ฐํžˆ ๋ณ„๋„์˜ ํ•™์Šต๊ณผ์ •์„ ์š”๊ตฌํ•˜์ง€ ์•Š์Šต๋‹ˆ๋‹ค. ์ด๋Ÿฌํ•œ ์Šค์ผ€์ค„๋Ÿฌ๋“ค์€ (ํ•ด๋‹น ์Šค์ผ€์ค„๋Ÿฌ ํ•˜์œ„ํด๋”์˜) configration ํŒŒ์ผ์„ ํ†ตํ•ด ์ •์˜๋ฉ๋‹ˆ๋‹ค. ์—ฌ๋Ÿฌ๊ฐœ์˜ ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ๋ถˆ๋Ÿฌ์˜จ๋‹ค๊ณ  ํ•ด์„œ ๋งŽ์€ ๋ฉ”๋ชจ๋ฆฌ๋ฅผ ์†Œ๋ชจํ•˜๋Š” ๊ฒƒ์€ ์•„๋‹ˆ๋ฉฐ, ๋‹ค์–‘ํ•œ ์Šค์ผ€์ค„๋Ÿฌ๋“ค์— ๋™์ผํ•œ ์Šค์ผ€์ค„๋Ÿฌ configration์„ ์ ์šฉํ•˜๋Š” ๊ฒƒ ์—ญ์‹œ ๊ฐ€๋Šฅํ•ฉ๋‹ˆ๋‹ค. ๋‹ค์Œ ์˜ˆ์‹œ ์ฝ”๋“œ์—์„œ ๋ถˆ๋Ÿฌ์˜ค๋Š” ์Šค์ผ€์ค„๋Ÿฌ๋“ค์€ ๋ชจ๋‘ [`StableDiffusionPipeline`]๊ณผ ํ˜ธํ™˜๋˜๋Š”๋ฐ, ์ด๋Š” ๊ณง ํ•ด๋‹น ์Šค์ผ€์ค„๋Ÿฌ๋“ค์— ๋™์ผํ•œ ์Šค์ผ€์ค„๋Ÿฌ configration ํŒŒ์ผ์„ ์ ์šฉํ•  ์ˆ˜ ์žˆ์Œ์„ ์˜๋ฏธํ•ฉ๋‹ˆ๋‹ค. ```python from diffusers import StableDiffusionPipeline from diffusers import ( DDPMScheduler, DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ) repo_id = "runwayml/stable-diffusion-v1-5" ddpm = DDPMScheduler.from_pretrained(repo_id, subfolder="scheduler") ddim = DDIMScheduler.from_pretrained(repo_id, subfolder="scheduler") pndm = PNDMScheduler.from_pretrained(repo_id, subfolder="scheduler") lms = LMSDiscreteScheduler.from_pretrained(repo_id, subfolder="scheduler") euler_anc = EulerAncestralDiscreteScheduler.from_pretrained(repo_id, subfolder="scheduler") euler = EulerDiscreteScheduler.from_pretrained(repo_id, subfolder="scheduler") dpm = DPMSolverMultistepScheduler.from_pretrained(repo_id, subfolder="scheduler") # replace `dpm` with any of `ddpm`, `ddim`, `pndm`, `lms`, `euler_anc`, `euler` pipeline = StableDiffusionPipeline.from_pretrained(repo_id, scheduler=dpm) ``` ### DiffusionPipeline์— ๋Œ€ํ•ด ์•Œ์•„๋ณด๊ธฐ ํด๋ž˜์Šค ๋ฉ”์„œ๋“œ๋กœ์„œ [`DiffusionPipeline.from_pretrained`]์€ 2๊ฐ€์ง€๋ฅผ ๋‹ด๋‹นํ•ฉ๋‹ˆ๋‹ค. - ์ฒซ์งธ๋กœ, `from_pretrained` ๋ฉ”์„œ๋“œ๋Š” ์ตœ์‹  ๋ฒ„์ „์˜ ํŒŒ์ดํ”„๋ผ์ธ์„ ๋‹ค์šด๋กœ๋“œํ•˜๊ณ , ์บ์‹œ์— ์ €์žฅํ•ฉ๋‹ˆ๋‹ค. ์ด๋ฏธ ๋กœ์ปฌ ์บ์‹œ์— ์ตœ์‹  ๋ฒ„์ „์˜ ํŒŒ์ดํ”„๋ผ์ธ์ด ์ €์žฅ๋˜์–ด ์žˆ๋‹ค๋ฉด, [`DiffusionPipeline.from_pretrained`]์€ ํ•ด๋‹น ํŒŒ์ผ๋“ค์„ ๋‹ค์‹œ ๋‹ค์šด๋กœ๋“œํ•˜์ง€ ์•Š๊ณ , ๋กœ์ปฌ ์บ์‹œ์— ์ €์žฅ๋˜์–ด ์žˆ๋Š” ํŒŒ์ดํ”„๋ผ์ธ์„ ๋ถˆ๋Ÿฌ์˜ต๋‹ˆ๋‹ค. - `model_index.json` ํŒŒ์ผ์„ ํ†ตํ•ด ์ฒดํฌํฌ์ธํŠธ์— ๋Œ€์‘๋˜๋Š” ์ ํ•ฉํ•œ ํŒŒ์ดํ”„๋ผ์ธ ํด๋ž˜์Šค๋กœ ๋ถˆ๋Ÿฌ์˜ต๋‹ˆ๋‹ค. ํŒŒ์ดํ”„๋ผ์ธ์˜ ํด๋” ๊ตฌ์กฐ๋Š” ํ•ด๋‹น ํŒŒ์ดํ”„๋ผ์ธ ํด๋ž˜์Šค์˜ ๊ตฌ์กฐ์™€ ์ง์ ‘์ ์œผ๋กœ ์ผ์น˜ํ•ฉ๋‹ˆ๋‹ค. ์˜ˆ๋ฅผ ๋“ค์–ด [`StableDiffusionPipeline`] ํด๋ž˜์Šค๋Š” [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) ๋ฆฌํฌ์ง€ํ† ๋ฆฌ์™€ ๋Œ€์‘๋˜๋Š” ๊ตฌ์กฐ๋ฅผ ๊ฐ–์Šต๋‹ˆ๋‹ค. ```python from diffusers import DiffusionPipeline repo_id = "runwayml/stable-diffusion-v1-5" pipeline = DiffusionPipeline.from_pretrained(repo_id) print(pipeline) ``` ์œ„์˜ ์ฝ”๋“œ ์ถœ๋ ฅ ๊ฒฐ๊ณผ๋ฅผ ํ™•์ธํ•ด๋ณด๋ฉด, `pipeline`์€ [`StableDiffusionPipeline`]์˜ ์ธ์Šคํ„ด์Šค์ด๋ฉฐ, ๋‹ค์Œ๊ณผ ๊ฐ™์ด ์ด 7๊ฐœ์˜ ์ปดํฌ๋„ŒํŠธ๋กœ ๊ตฌ์„ฑ๋œ๋‹ค๋Š” ๊ฒƒ์„ ์•Œ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. - `"feature_extractor"`: [`~transformers.CLIPFeatureExtractor`]์˜ ์ธ์Šคํ„ด์Šค - `"safety_checker"`: ์œ ํ•ดํ•œ ์ปจํ…์ธ ๋ฅผ ์Šคํฌ๋ฆฌ๋‹ํ•˜๊ธฐ ์œ„ํ•œ [์ปดํฌ๋„ŒํŠธ](https://github.com/huggingface/diffusers/blob/e55687e1e15407f60f32242027b7bb8170e58266/src/diffusers/pipelines/stable_diffusion/safety_checker.py#L32) - `"scheduler"`: [`PNDMScheduler`]์˜ ์ธ์Šคํ„ด์Šค - `"text_encoder"`: [`~transformers.CLIPTextModel`]์˜ ์ธ์Šคํ„ด์Šค - `"tokenizer"`: a [`~transformers.CLIPTokenizer`]์˜ ์ธ์Šคํ„ด์Šค - `"unet"`: [`UNet2DConditionModel`]์˜ ์ธ์Šคํ„ด์Šค - `"vae"` [`AutoencoderKL`]์˜ ์ธ์Šคํ„ด์Šค ```json StableDiffusionPipeline { "feature_extractor": [ "transformers", "CLIPImageProcessor" ], "safety_checker": [ "stable_diffusion", "StableDiffusionSafetyChecker" ], "scheduler": [ "diffusers", "PNDMScheduler" ], "text_encoder": [ "transformers", "CLIPTextModel" ], "tokenizer": [ "transformers", "CLIPTokenizer" ], "unet": [ "diffusers", "UNet2DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ] } ``` ํŒŒ์ดํ”„๋ผ์ธ ์ธ์Šคํ„ด์Šค์˜ ์ปดํฌ๋„ŒํŠธ๋“ค์„ [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5)์˜ ํด๋” ๊ตฌ์กฐ์™€ ๋น„๊ตํ•ด๋ณผ ๊ฒฝ์šฐ, ๊ฐ๊ฐ์˜ ์ปดํฌ๋„ŒํŠธ๋งˆ๋‹ค ๋ณ„๋„์˜ ํด๋”๊ฐ€ ์žˆ์Œ์„ ํ™•์ธํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ``` . โ”œโ”€โ”€ feature_extractor โ”‚ โ””โ”€โ”€ preprocessor_config.json โ”œโ”€โ”€ model_index.json โ”œโ”€โ”€ safety_checker โ”‚ โ”œโ”€โ”€ config.json โ”‚ โ””โ”€โ”€ pytorch_model.bin โ”œโ”€โ”€ scheduler โ”‚ โ””โ”€โ”€ scheduler_config.json โ”œโ”€โ”€ text_encoder โ”‚ โ”œโ”€โ”€ config.json โ”‚ โ””โ”€โ”€ pytorch_model.bin โ”œโ”€โ”€ tokenizer โ”‚ โ”œโ”€โ”€ merges.txt โ”‚ โ”œโ”€โ”€ special_tokens_map.json โ”‚ โ”œโ”€โ”€ tokenizer_config.json โ”‚ โ””โ”€โ”€ vocab.json โ”œโ”€โ”€ unet โ”‚ โ”œโ”€โ”€ config.json โ”‚ โ”œโ”€โ”€ diffusion_pytorch_model.bin โ””โ”€โ”€ vae โ”œโ”€โ”€ config.json โ”œโ”€โ”€ diffusion_pytorch_model.bin ``` ๋˜ํ•œ ๊ฐ๊ฐ์˜ ์ปดํฌ๋„ŒํŠธ๋“ค์„ ํŒŒ์ดํ”„๋ผ์ธ ์ธ์Šคํ„ด์Šค์˜ ์†์„ฑ์œผ๋กœ์จ ์ฐธ์กฐํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ```py pipeline.tokenizer ``` ```python CLIPTokenizer( name_or_path="/root/.cache/huggingface/hub/models--runwayml--stable-diffusion-v1-5/snapshots/39593d5650112b4cc580433f6b0435385882d819/tokenizer", vocab_size=49408, model_max_length=77, is_fast=False, padding_side="right", truncation_side="right", special_tokens={ "bos_token": AddedToken("<|startoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True), "eos_token": AddedToken("<|endoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True), "unk_token": AddedToken("<|endoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True), "pad_token": "<|endoftext|>", }, ) ``` ๋ชจ๋“  ํŒŒ์ดํ”„๋ผ์ธ์€ `model_index.json` ํŒŒ์ผ์„ ํ†ตํ•ด [`DiffusionPipeline`]์— ๋‹ค์Œ๊ณผ ๊ฐ™์€ ์ •๋ณด๋ฅผ ์ „๋‹ฌํ•ฉ๋‹ˆ๋‹ค. - `_class_name` ๋Š” ์–ด๋–ค ํŒŒ์ดํ”„๋ผ์ธ ํด๋ž˜์Šค๋ฅผ ์‚ฌ์šฉํ•ด์•ผ ํ•˜๋Š”์ง€์— ๋Œ€ํ•ด ์•Œ๋ ค์ค๋‹ˆ๋‹ค. - `_diffusers_version`๋Š” ์–ด๋–ค ๋ฒ„์ „์˜ ๋””ํ“จ์ €์Šค๋กœ ํŒŒ์ดํ”„๋ผ์ธ ์•ˆ์˜ ๋ชจ๋ธ๋“ค์ด ๋งŒ๋“ค์–ด์กŒ๋Š”์ง€๋ฅผ ์•Œ๋ ค์ค๋‹ˆ๋‹ค. - ๊ทธ ๋‹ค์Œ์€ ๊ฐ๊ฐ์˜ ์ปดํฌ๋„ŒํŠธ๋“ค์ด ์–ด๋–ค ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ์˜ ์–ด๋–ค ํด๋ž˜์Šค๋กœ ๋งŒ๋“ค์–ด์กŒ๋Š”์ง€์— ๋Œ€ํ•ด ์•Œ๋ ค์ค๋‹ˆ๋‹ค. (์•„๋ž˜ ์˜ˆ์‹œ์—์„œ `"feature_extractor" : ["transformers", "CLIPImageProcessor"]`์˜ ๊ฒฝ์šฐ, `feature_extractor` ์ปดํฌ๋„ŒํŠธ๋Š” `transformers` ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ์˜ `CLIPImageProcessor` ํด๋ž˜์Šค๋ฅผ ํ†ตํ•ด ๋งŒ๋“ค์–ด์กŒ๋‹ค๋Š” ๊ฒƒ์„ ์˜๋ฏธํ•ฉ๋‹ˆ๋‹ค.) ```json { "_class_name": "StableDiffusionPipeline", "_diffusers_version": "0.6.0", "feature_extractor": [ "transformers", "CLIPImageProcessor" ], "safety_checker": [ "stable_diffusion", "StableDiffusionSafetyChecker" ], "scheduler": [ "diffusers", "PNDMScheduler" ], "text_encoder": [ "transformers", "CLIPTextModel" ], "tokenizer": [ "transformers", "CLIPTokenizer" ], "unet": [ "diffusers", "UNet2DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ] } ```
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/using-diffusers/pipeline_overview.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Overview ํŒŒ์ดํ”„๋ผ์ธ์€ ๋…๋ฆฝ์ ์œผ๋กœ ํ›ˆ๋ จ๋œ ๋ชจ๋ธ๊ณผ ์Šค์ผ€์ค„๋Ÿฌ๋ฅผ ํ•จ๊ป˜ ๋ชจ์•„์„œ ์ถ”๋ก ์„ ์œ„ํ•ด diffusion ์‹œ์Šคํ…œ์„ ๋น ๋ฅด๊ณ  ์‰ฝ๊ฒŒ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ๋Š” ๋ฐฉ๋ฒ•์„ ์ œ๊ณตํ•˜๋Š” end-to-end ํด๋ž˜์Šค์ž…๋‹ˆ๋‹ค. ๋ชจ๋ธ๊ณผ ์Šค์ผ€์ค„๋Ÿฌ์˜ ํŠน์ • ์กฐํ•ฉ์€ ํŠน์ˆ˜ํ•œ ๊ธฐ๋Šฅ๊ณผ ํ•จ๊ป˜ [`StableDiffusionPipeline`] ๋˜๋Š” [`StableDiffusionControlNetPipeline`]๊ณผ ๊ฐ™์€ ํŠน์ • ํŒŒ์ดํ”„๋ผ์ธ ์œ ํ˜•์„ ์ •์˜ํ•ฉ๋‹ˆ๋‹ค. ๋ชจ๋“  ํŒŒ์ดํ”„๋ผ์ธ ์œ ํ˜•์€ ๊ธฐ๋ณธ [`DiffusionPipeline`] ํด๋ž˜์Šค์—์„œ ์ƒ์†๋ฉ๋‹ˆ๋‹ค. ์–ด๋А ์ฒดํฌํฌ์ธํŠธ๋ฅผ ์ „๋‹ฌํ•˜๋ฉด, ํŒŒ์ดํ”„๋ผ์ธ ์œ ํ˜•์„ ์ž๋™์œผ๋กœ ๊ฐ์ง€ํ•˜๊ณ  ํ•„์š”ํ•œ ๊ตฌ์„ฑ ์š”์†Œ๋“ค์„ ๋ถˆ๋Ÿฌ์˜ต๋‹ˆ๋‹ค. ์ด ์„น์…˜์—์„œ๋Š” unconditional ์ด๋ฏธ์ง€ ์ƒ์„ฑ, text-to-image ์ƒ์„ฑ์˜ ๋‹ค์–‘ํ•œ ํ…Œํฌ๋‹‰๊ณผ ๋ณ€ํ™”๋ฅผ ํŒŒ์ดํ”„๋ผ์ธ์—์„œ ์ง€์›ํ•˜๋Š” ์ž‘์—…๋“ค์„ ์†Œ๊ฐœํ•ฉ๋‹ˆ๋‹ค. ํ”„๋กฌํ”„ํŠธ์— ์žˆ๋Š” ํŠน์ • ๋‹จ์–ด๊ฐ€ ์ถœ๋ ฅ์— ์˜ํ–ฅ์„ ๋ฏธ์น˜๋Š” ๊ฒƒ์„ ์กฐ์ •ํ•˜๊ธฐ ์œ„ํ•ด ์žฌํ˜„์„ฑ์„ ์œ„ํ•œ ์‹œ๋“œ ์„ค์ •๊ณผ ํ”„๋กฌํ”„ํŠธ์— ๊ฐ€์ค‘์น˜๋ฅผ ๋ถ€์—ฌํ•˜๋Š” ๊ฒƒ์œผ๋กœ ์ƒ์„ฑ ํ”„๋กœ์„ธ์Šค๋ฅผ ๋” ์ž˜ ์ œ์–ดํ•˜๋Š” ๋ฐฉ๋ฒ•์— ๋Œ€ํ•ด ๋ฐฐ์šธ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋งˆ์ง€๋ง‰์œผ๋กœ ์Œ์„ฑ์—์„œ๋ถ€ํ„ฐ ์ด๋ฏธ์ง€ ์ƒ์„ฑ๊ณผ ๊ฐ™์€ ์ปค์Šคํ…€ ์ž‘์—…์„ ์œ„ํ•œ ์ปค๋ฎค๋‹ˆํ‹ฐ ํŒŒ์ดํ”„๋ผ์ธ์„ ๋งŒ๋“œ๋Š” ๋ฐฉ๋ฒ•์„ ์•Œ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค.
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/using-diffusers/reusing_seeds.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Deterministic(๊ฒฐ์ •์ ) ์ƒ์„ฑ์„ ํ†ตํ•œ ์ด๋ฏธ์ง€ ํ’ˆ์งˆ ๊ฐœ์„  ์ƒ์„ฑ๋œ ์ด๋ฏธ์ง€์˜ ํ’ˆ์งˆ์„ ๊ฐœ์„ ํ•˜๋Š” ์ผ๋ฐ˜์ ์ธ ๋ฐฉ๋ฒ•์€ *๊ฒฐ์ •์  batch(๋ฐฐ์น˜) ์ƒ์„ฑ*์„ ์‚ฌ์šฉํ•˜๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค. ์ด ๋ฐฉ๋ฒ•์€ ์ด๋ฏธ์ง€ batch(๋ฐฐ์น˜)๋ฅผ ์ƒ์„ฑํ•˜๊ณ  ๋‘ ๋ฒˆ์งธ ์ถ”๋ก  ๋ผ์šด๋“œ์—์„œ ๋” ์ž์„ธํ•œ ํ”„๋กฌํ”„ํŠธ์™€ ํ•จ๊ป˜ ๊ฐœ์„ ํ•  ์ด๋ฏธ์ง€ ํ•˜๋‚˜๋ฅผ ์„ ํƒํ•˜๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค. ํ•ต์‹ฌ์€ ์ผ๊ด„ ์ด๋ฏธ์ง€ ์ƒ์„ฑ์„ ์œ„ํ•ด ํŒŒ์ดํ”„๋ผ์ธ์— [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html#generator) ๋ชฉ๋ก์„ ์ „๋‹ฌํ•˜๊ณ , ๊ฐ `Generator`๋ฅผ ์‹œ๋“œ์— ์—ฐ๊ฒฐํ•˜์—ฌ ์ด๋ฏธ์ง€์— ์žฌ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ๋„๋ก ํ•˜๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค. ์˜ˆ๋ฅผ ๋“ค์–ด [`runwayml/stable-diffusion-v1-5`](runwayml/stable-diffusion-v1-5)๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ๋‹ค์Œ ํ”„๋กฌํ”„ํŠธ์˜ ์—ฌ๋Ÿฌ ๋ฒ„์ „์„ ์ƒ์„ฑํ•ด ๋ด…์‹œ๋‹ค. ```py prompt = "Labrador in the style of Vermeer" ``` (๊ฐ€๋Šฅํ•˜๋‹ค๋ฉด) ํŒŒ์ดํ”„๋ผ์ธ์„ [`DiffusionPipeline.from_pretrained`]๋กœ ์ธ์Šคํ„ด์Šคํ™”ํ•˜์—ฌ GPU์— ๋ฐฐ์น˜ํ•ฉ๋‹ˆ๋‹ค. ```python >>> from diffusers import DiffusionPipeline >>> pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) >>> pipe = pipe.to("cuda") ``` ์ด์ œ ๋„ค ๊ฐœ์˜ ์„œ๋กœ ๋‹ค๋ฅธ `Generator`๋ฅผ ์ •์˜ํ•˜๊ณ  ๊ฐ `Generator`์— ์‹œ๋“œ(`0` ~ `3`)๋ฅผ ํ• ๋‹นํ•˜์—ฌ ๋‚˜์ค‘์— ํŠน์ • ์ด๋ฏธ์ง€์— ๋Œ€ํ•ด `Generator`๋ฅผ ์žฌ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ๋„๋ก ํ•ฉ๋‹ˆ๋‹ค. ```python >>> import torch >>> generator = [torch.Generator(device="cuda").manual_seed(i) for i in range(4)] ``` ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•˜๊ณ  ์‚ดํŽด๋ด…๋‹ˆ๋‹ค. ```python >>> images = pipe(prompt, generator=generator, num_images_per_prompt=4).images >>> images ``` ![img](https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/reusabe_seeds.jpg) ์ด ์˜ˆ์ œ์—์„œ๋Š” ์ฒซ ๋ฒˆ์งธ ์ด๋ฏธ์ง€๋ฅผ ๊ฐœ์„ ํ–ˆ์ง€๋งŒ ์‹ค์ œ๋กœ๋Š” ์›ํ•˜๋Š” ๋ชจ๋“  ์ด๋ฏธ์ง€๋ฅผ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค(์‹ฌ์ง€์–ด ๋‘ ๊ฐœ์˜ ๋ˆˆ์ด ์žˆ๋Š” ์ด๋ฏธ์ง€๋„!). ์ฒซ ๋ฒˆ์งธ ์ด๋ฏธ์ง€์—์„œ๋Š” ์‹œ๋“œ๊ฐ€ '0'์ธ '์ƒ์„ฑ๊ธฐ'๋ฅผ ์‚ฌ์šฉํ–ˆ๊ธฐ ๋•Œ๋ฌธ์— ๋‘ ๋ฒˆ์งธ ์ถ”๋ก  ๋ผ์šด๋“œ์—์„œ๋Š” ์ด '์ƒ์„ฑ๊ธฐ'๋ฅผ ์žฌ์‚ฌ์šฉํ•  ๊ฒƒ์ž…๋‹ˆ๋‹ค. ์ด๋ฏธ์ง€์˜ ํ’ˆ์งˆ์„ ๊ฐœ์„ ํ•˜๋ ค๋ฉด ํ”„๋กฌํ”„ํŠธ์— ๋ช‡ ๊ฐ€์ง€ ํ…์ŠคํŠธ๋ฅผ ์ถ”๊ฐ€ํ•ฉ๋‹ˆ๋‹ค: ```python prompt = [prompt + t for t in [", highly realistic", ", artsy", ", trending", ", colorful"]] generator = [torch.Generator(device="cuda").manual_seed(0) for i in range(4)] ``` ์‹œ๋“œ๊ฐ€ `0`์ธ ์ œ๋„ˆ๋ ˆ์ดํ„ฐ 4๊ฐœ๋ฅผ ์ƒ์„ฑํ•˜๊ณ , ์ด์ „ ๋ผ์šด๋“œ์˜ ์ฒซ ๋ฒˆ์งธ ์ด๋ฏธ์ง€์ฒ˜๋Ÿผ ๋ณด์ด๋Š” ๋‹ค๋ฅธ ์ด๋ฏธ์ง€ batch(๋ฐฐ์น˜)๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค! ```python >>> images = pipe(prompt, generator=generator).images >>> images ``` ![img](https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/reusabe_seeds_2.jpg)
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/training/adapt_a_model.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ์ƒˆ๋กœ์šด ์ž‘์—…์— ๋Œ€ํ•œ ๋ชจ๋ธ์„ ์ ์šฉํ•˜๊ธฐ ๋งŽ์€ diffusion ์‹œ์Šคํ…œ์€ ๊ฐ™์€ ๊ตฌ์„ฑ ์š”์†Œ๋“ค์„ ๊ณต์œ ํ•˜๋ฏ€๋กœ ํ•œ ์ž‘์—…์— ๋Œ€ํ•ด ์‚ฌ์ „ํ•™์Šต๋œ ๋ชจ๋ธ์„ ์™„์ „ํžˆ ๋‹ค๋ฅธ ์ž‘์—…์— ์ ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด ์ธํŽ˜์ธํŒ…์„ ์œ„ํ•œ ๊ฐ€์ด๋“œ๋Š” ์‚ฌ์ „ํ•™์Šต๋œ [`UNet2DConditionModel`]์˜ ์•„ํ‚คํ…์ฒ˜๋ฅผ ์ดˆ๊ธฐํ™”ํ•˜๊ณ  ์ˆ˜์ •ํ•˜์—ฌ ์‚ฌ์ „ํ•™์Šต๋œ text-to-image ๋ชจ๋ธ์„ ์–ด๋–ป๊ฒŒ ์ธํŽ˜์ธํŒ…์— ์ ์šฉํ•˜๋Š”์ง€๋ฅผ ์•Œ๋ ค์ค„ ๊ฒƒ์ž…๋‹ˆ๋‹ค. ## UNet2DConditionModel ํŒŒ๋ผ๋ฏธํ„ฐ ๊ตฌ์„ฑ [`UNet2DConditionModel`]์€ [input sample](https://huggingface.co/docs/diffusers/v0.16.0/en/api/models#diffusers.UNet2DConditionModel.in_channels)์—์„œ 4๊ฐœ์˜ ์ฑ„๋„์„ ๊ธฐ๋ณธ์ ์œผ๋กœ ํ—ˆ์šฉํ•ฉ๋‹ˆ๋‹ค. ์˜ˆ๋ฅผ ๋“ค์–ด, [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5)์™€ ๊ฐ™์€ ์‚ฌ์ „ํ•™์Šต๋œ text-to-image ๋ชจ๋ธ์„ ๋ถˆ๋Ÿฌ์˜ค๊ณ  `in_channels`์˜ ์ˆ˜๋ฅผ ํ™•์ธํ•ฉ๋‹ˆ๋‹ค: ```py from diffusers import StableDiffusionPipeline pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") pipeline.unet.config["in_channels"] 4 ``` ์ธํŽ˜์ธํŒ…์€ ์ž…๋ ฅ ์ƒ˜ํ”Œ์— 9๊ฐœ์˜ ์ฑ„๋„์ด ํ•„์š”ํ•ฉ๋‹ˆ๋‹ค. [`runwayml/stable-diffusion-inpainting`](https://huggingface.co/runwayml/stable-diffusion-inpainting)์™€ ๊ฐ™์€ ์‚ฌ์ „ํ•™์Šต๋œ ์ธํŽ˜์ธํŒ… ๋ชจ๋ธ์—์„œ ์ด ๊ฐ’์„ ํ™•์ธํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```py from diffusers import StableDiffusionPipeline pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-inpainting") pipeline.unet.config["in_channels"] 9 ``` ์ธํŽ˜์ธํŒ…์— ๋Œ€ํ•œ text-to-image ๋ชจ๋ธ์„ ์ ์šฉํ•˜๊ธฐ ์œ„ํ•ด, `in_channels` ์ˆ˜๋ฅผ 4์—์„œ 9๋กœ ์ˆ˜์ •ํ•ด์•ผ ํ•  ๊ฒƒ์ž…๋‹ˆ๋‹ค. ์‚ฌ์ „ํ•™์Šต๋œ text-to-image ๋ชจ๋ธ์˜ ๊ฐ€์ค‘์น˜์™€ [`UNet2DConditionModel`]์„ ์ดˆ๊ธฐํ™”ํ•˜๊ณ  `in_channels`๋ฅผ 9๋กœ ์ˆ˜์ •ํ•ด ์ฃผ์„ธ์š”. `in_channels`์˜ ์ˆ˜๋ฅผ ์ˆ˜์ •ํ•˜๋ฉด ํฌ๊ธฐ๊ฐ€ ๋‹ฌ๋ผ์ง€๊ธฐ ๋•Œ๋ฌธ์— ํฌ๊ธฐ๊ฐ€ ์•ˆ ๋งž๋Š” ์˜ค๋ฅ˜๋ฅผ ํ”ผํ•˜๊ธฐ ์œ„ํ•ด `ignore_mismatched_sizes=True` ๋ฐ `low_cpu_mem_usage=False`๋ฅผ ์„ค์ •ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ```py from diffusers import UNet2DConditionModel model_id = "runwayml/stable-diffusion-v1-5" unet = UNet2DConditionModel.from_pretrained( model_id, subfolder="unet", in_channels=9, low_cpu_mem_usage=False, ignore_mismatched_sizes=True ) ``` Text-to-image ๋ชจ๋ธ๋กœ๋ถ€ํ„ฐ ๋‹ค๋ฅธ ๊ตฌ์„ฑ ์š”์†Œ์˜ ์‚ฌ์ „ํ•™์Šต๋œ ๊ฐ€์ค‘์น˜๋Š” ์ฒดํฌํฌ์ธํŠธ๋กœ๋ถ€ํ„ฐ ์ดˆ๊ธฐํ™”๋˜์ง€๋งŒ `unet`์˜ ์ž…๋ ฅ ์ฑ„๋„ ๊ฐ€์ค‘์น˜ (`conv_in.weight`)๋Š” ๋žœ๋คํ•˜๊ฒŒ ์ดˆ๊ธฐํ™”๋ฉ๋‹ˆ๋‹ค. ๊ทธ๋ ‡์ง€ ์•Š์œผ๋ฉด ๋ชจ๋ธ์ด ๋…ธ์ด์ฆˆ๋ฅผ ๋ฆฌํ„ดํ•˜๊ธฐ ๋•Œ๋ฌธ์— ์ธํŽ˜์ธํŒ…์˜ ๋ชจ๋ธ์„ ํŒŒ์ธํŠœ๋‹ ํ•  ๋•Œ ์ค‘์š”ํ•ฉ๋‹ˆ๋‹ค.
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/training/create_dataset.md
# ํ•™์Šต์„ ์œ„ํ•œ ๋ฐ์ดํ„ฐ์…‹ ๋งŒ๋“ค๊ธฐ [Hub](https://huggingface.co/datasets?task_categories=task_categories:text-to-image&sort=downloads) ์—๋Š” ๋ชจ๋ธ ๊ต์œก์„ ์œ„ํ•œ ๋งŽ์€ ๋ฐ์ดํ„ฐ์…‹์ด ์žˆ์ง€๋งŒ, ๊ด€์‹ฌ์ด ์žˆ๊ฑฐ๋‚˜ ์‚ฌ์šฉํ•˜๊ณ  ์‹ถ์€ ๋ฐ์ดํ„ฐ์…‹์„ ์ฐพ์„ ์ˆ˜ ์—†๋Š” ๊ฒฝ์šฐ ๐Ÿค— [Datasets](hf.co/docs/datasets) ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ๋ฐ์ดํ„ฐ์…‹์„ ๋งŒ๋“ค ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋ฐ์ดํ„ฐ์…‹ ๊ตฌ์กฐ๋Š” ๋ชจ๋ธ์„ ํ•™์Šตํ•˜๋ ค๋Š” ์ž‘์—…์— ๋”ฐ๋ผ ๋‹ฌ๋ผ์ง‘๋‹ˆ๋‹ค. ๊ฐ€์žฅ ๊ธฐ๋ณธ์ ์ธ ๋ฐ์ดํ„ฐ์…‹ ๊ตฌ์กฐ๋Š” unconditional ์ด๋ฏธ์ง€ ์ƒ์„ฑ๊ณผ ๊ฐ™์€ ์ž‘์—…์„ ์œ„ํ•œ ์ด๋ฏธ์ง€ ๋””๋ ‰ํ† ๋ฆฌ์ž…๋‹ˆ๋‹ค. ๋˜ ๋‹ค๋ฅธ ๋ฐ์ดํ„ฐ์…‹ ๊ตฌ์กฐ๋Š” ์ด๋ฏธ์ง€ ๋””๋ ‰ํ† ๋ฆฌ์™€ text-to-image ์ƒ์„ฑ๊ณผ ๊ฐ™์€ ์ž‘์—…์— ํ•ด๋‹นํ•˜๋Š” ํ…์ŠคํŠธ ์บก์…˜์ด ํฌํ•จ๋œ ํ…์ŠคํŠธ ํŒŒ์ผ์ผ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด ๊ฐ€์ด๋“œ์—๋Š” ํŒŒ์ธ ํŠœ๋‹ํ•  ๋ฐ์ดํ„ฐ์…‹์„ ๋งŒ๋“œ๋Š” ๋‘ ๊ฐ€์ง€ ๋ฐฉ๋ฒ•์„ ์†Œ๊ฐœํ•ฉ๋‹ˆ๋‹ค: - ์ด๋ฏธ์ง€ ํด๋”๋ฅผ `--train_data_dir` ์ธ์ˆ˜์— ์ œ๊ณตํ•ฉ๋‹ˆ๋‹ค. - ๋ฐ์ดํ„ฐ์…‹์„ Hub์— ์—…๋กœ๋“œํ•˜๊ณ  ๋ฐ์ดํ„ฐ์…‹ ๋ฆฌํฌ์ง€ํ† ๋ฆฌ id๋ฅผ `--dataset_name` ์ธ์ˆ˜์— ์ „๋‹ฌํ•ฉ๋‹ˆ๋‹ค. <Tip> ๐Ÿ’ก ํ•™์Šต์— ์‚ฌ์šฉํ•  ์ด๋ฏธ์ง€ ๋ฐ์ดํ„ฐ์…‹์„ ๋งŒ๋“œ๋Š” ๋ฐฉ๋ฒ•์— ๋Œ€ํ•œ ์ž์„ธํ•œ ๋‚ด์šฉ์€ [์ด๋ฏธ์ง€ ๋ฐ์ดํ„ฐ์…‹ ๋งŒ๋“ค๊ธฐ](https://huggingface.co/docs/datasets/image_dataset) ๊ฐ€์ด๋“œ๋ฅผ ์ฐธ๊ณ ํ•˜์„ธ์š”. </Tip> ## ํด๋” ํ˜•ํƒœ๋กœ ๋ฐ์ดํ„ฐ์…‹ ๊ตฌ์ถ•ํ•˜๊ธฐ Unconditional ์ƒ์„ฑ์„ ์œ„ํ•ด ์ด๋ฏธ์ง€ ํด๋”๋กœ ์ž์‹ ์˜ ๋ฐ์ดํ„ฐ์…‹์„ ๊ตฌ์ถ•ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ๋Š” ๐Ÿค— Datasets์˜ [ImageFolder](https://huggingface.co/docs/datasets/en/image_dataset#imagefolder) ๋นŒ๋”๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์ž๋™์œผ๋กœ ํด๋”์—์„œ ๋ฐ์ดํ„ฐ์…‹์„ ๊ตฌ์ถ•ํ•ฉ๋‹ˆ๋‹ค. ๋””๋ ‰ํ† ๋ฆฌ ๊ตฌ์กฐ๋Š” ๋‹ค์Œ๊ณผ ๊ฐ™์•„์•ผ ํ•ฉ๋‹ˆ๋‹ค : ```bash data_dir/xxx.png data_dir/xxy.png data_dir/[...]/xxz.png ``` ๋ฐ์ดํ„ฐ์…‹ ๋””๋ ‰ํ„ฐ๋ฆฌ์˜ ๊ฒฝ๋กœ๋ฅผ `--train_data_dir` ์ธ์ˆ˜๋กœ ์ „๋‹ฌํ•œ ๋‹ค์Œ ํ•™์Šต์„ ์‹œ์ž‘ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```bash accelerate launch train_unconditional.py \ # argument๋กœ ํด๋” ์ง€์ •ํ•˜๊ธฐ \ --train_data_dir <path-to-train-directory> \ <other-arguments> ``` ## Hub์— ๋ฐ์ดํ„ฐ ์˜ฌ๋ฆฌ๊ธฐ <Tip> ๐Ÿ’ก ๋ฐ์ดํ„ฐ์…‹์„ ๋งŒ๋“ค๊ณ  Hub์— ์—…๋กœ๋“œํ•˜๋Š” ๊ฒƒ์— ๋Œ€ํ•œ ์ž์„ธํ•œ ๋‚ด์šฉ์€ [๐Ÿค— Datasets์„ ์‚ฌ์šฉํ•œ ์ด๋ฏธ์ง€ ๊ฒ€์ƒ‰](https://huggingface.co/blog/image-search-datasets) ๊ฒŒ์‹œ๋ฌผ์„ ์ฐธ๊ณ ํ•˜์„ธ์š”. </Tip> PIL ์ธ์ฝ”๋”ฉ๋œ ์ด๋ฏธ์ง€๊ฐ€ ํฌํ•จ๋œ `์ด๋ฏธ์ง€` ์—ด์„ ์ƒ์„ฑํ•˜๋Š” [์ด๋ฏธ์ง€ ํด๋”](https://huggingface.co/docs/datasets/image_load#imagefolder) ๊ธฐ๋Šฅ์„ ์‚ฌ์šฉํ•˜์—ฌ ๋ฐ์ดํ„ฐ์…‹ ์ƒ์„ฑ์„ ์‹œ์ž‘ํ•ฉ๋‹ˆ๋‹ค. `data_dir` ๋˜๋Š” `data_files` ๋งค๊ฐœ ๋ณ€์ˆ˜๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ๋ฐ์ดํ„ฐ์…‹์˜ ์œ„์น˜๋ฅผ ์ง€์ •ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. `data_files` ๋งค๊ฐœ๋ณ€์ˆ˜๋Š” ํŠน์ • ํŒŒ์ผ์„ `train` ์ด๋‚˜ `test` ๋กœ ๋ถ„๋ฆฌํ•œ ๋ฐ์ดํ„ฐ์…‹์— ๋งคํ•‘ํ•˜๋Š” ๊ฒƒ์„ ์ง€์›ํ•ฉ๋‹ˆ๋‹ค: ```python from datasets import load_dataset # ์˜ˆ์‹œ 1: ๋กœ์ปฌ ํด๋” dataset = load_dataset("imagefolder", data_dir="path_to_your_folder") # ์˜ˆ์‹œ 2: ๋กœ์ปฌ ํŒŒ์ผ (์ง€์› ํฌ๋งท : tar, gzip, zip, xz, rar, zstd) dataset = load_dataset("imagefolder", data_files="path_to_zip_file") # ์˜ˆ์‹œ 3: ์›๊ฒฉ ํŒŒ์ผ (์ง€์› ํฌ๋งท : tar, gzip, zip, xz, rar, zstd) dataset = load_dataset( "imagefolder", data_files="https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip", ) # ์˜ˆ์‹œ 4: ์—ฌ๋Ÿฌ๊ฐœ๋กœ ๋ถ„ํ•  dataset = load_dataset( "imagefolder", data_files={"train": ["path/to/file1", "path/to/file2"], "test": ["path/to/file3", "path/to/file4"]} ) ``` [push_to_hub(https://huggingface.co/docs/datasets/v2.13.1/en/package_reference/main_classes#datasets.Dataset.push_to_hub) ์„ ์‚ฌ์šฉํ•ด์„œ Hub์— ๋ฐ์ดํ„ฐ์…‹์„ ์—…๋กœ๋“œ ํ•ฉ๋‹ˆ๋‹ค: ```python # ํ„ฐ๋ฏธ๋„์—์„œ huggingface-cli login ์ปค๋งจ๋“œ๋ฅผ ์ด๋ฏธ ์‹คํ–‰ํ–ˆ๋‹ค๊ณ  ๊ฐ€์ •ํ•ฉ๋‹ˆ๋‹ค dataset.push_to_hub("name_of_your_dataset") # ๊ฐœ์ธ repo๋กœ push ํ•˜๊ณ  ์‹ถ๋‹ค๋ฉด, `private=True` ์„ ์ถ”๊ฐ€ํ•˜์„ธ์š”: dataset.push_to_hub("name_of_your_dataset", private=True) ``` ์ด์ œ ๋ฐ์ดํ„ฐ์…‹ ์ด๋ฆ„์„ `--dataset_name` ์ธ์ˆ˜์— ์ „๋‹ฌํ•˜์—ฌ ๋ฐ์ดํ„ฐ์…‹์„ ํ•™์Šต์— ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```bash accelerate launch --mixed_precision="fp16" train_text_to_image.py \ --pretrained_model_name_or_path="runwayml/stable-diffusion-v1-5" \ --dataset_name="name_of_your_dataset" \ <other-arguments> ``` ## ๋‹ค์Œ ๋‹จ๊ณ„ ๋ฐ์ดํ„ฐ์…‹์„ ์ƒ์„ฑํ–ˆ์œผ๋‹ˆ ์ด์ œ ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ์˜ `train_data_dir` (๋ฐ์ดํ„ฐ์…‹์ด ๋กœ์ปฌ์ด๋ฉด) ํ˜น์€ `dataset_name` (Hub์— ๋ฐ์ดํ„ฐ์…‹์„ ์˜ฌ๋ ธ์œผ๋ฉด) ์ธ์ˆ˜์— ์—ฐ๊ฒฐํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋‹ค์Œ ๋‹จ๊ณ„์—์„œ๋Š” ๋ฐ์ดํ„ฐ์…‹์„ ์‚ฌ์šฉํ•˜์—ฌ [unconditional ์ƒ์„ฑ](https://huggingface.co/docs/diffusers/v0.18.2/en/training/unconditional_training) ๋˜๋Š” [ํ…์ŠคํŠธ-์ด๋ฏธ์ง€ ์ƒ์„ฑ](https://huggingface.co/docs/diffusers/training/text2image)์„ ์œ„ํ•œ ๋ชจ๋ธ์„ ํ•™์Šต์‹œ์ผœ๋ณด์„ธ์š”!
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/training/controlnet.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ControlNet [Adding Conditional Control to Text-to-Image Diffusion Models](https://arxiv.org/abs/2302.05543) (ControlNet)์€ Lvmin Zhang๊ณผ Maneesh Agrawala์— ์˜ํ•ด ์“ฐ์—ฌ์กŒ์Šต๋‹ˆ๋‹ค. ์ด ์˜ˆ์‹œ๋Š” [์›๋ณธ ControlNet ๋ฆฌํฌ์ง€ํ† ๋ฆฌ์—์„œ ์˜ˆ์‹œ ํ•™์Šตํ•˜๊ธฐ](https://github.com/lllyasviel/ControlNet/blob/main/docs/train.md)์— ๊ธฐ๋ฐ˜ํ•ฉ๋‹ˆ๋‹ค. ControlNet์€ ์›๋“ค์„ ์ฑ„์šฐ๊ธฐ ์œ„ํ•ด [small synthetic dataset](https://huggingface.co/datasets/fusing/fill50k)์„ ์‚ฌ์šฉํ•ด์„œ ํ•™์Šต๋ฉ๋‹ˆ๋‹ค. ## ์˜์กด์„ฑ ์„ค์น˜ํ•˜๊ธฐ ์•„๋ž˜์˜ ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์‹คํ–‰ํ•˜๊ธฐ ์ „์—, ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ์˜ ํ•™์Šต ์˜์กด์„ฑ์„ ์„ค์น˜ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. <Tip warning={true}> ๊ฐ€์žฅ ์ตœ์‹  ๋ฒ„์ „์˜ ์˜ˆ์‹œ ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์„ฑ๊ณต์ ์œผ๋กœ ์‹คํ–‰ํ•˜๊ธฐ ์œ„ํ•ด์„œ๋Š”, ์†Œ์Šค์—์„œ ์„ค์น˜ํ•˜๊ณ  ์ตœ์‹  ๋ฒ„์ „์˜ ์„ค์น˜๋ฅผ ์œ ์ง€ํ•˜๋Š” ๊ฒƒ์„ ๊ฐ•๋ ฅํ•˜๊ฒŒ ์ถ”์ฒœํ•ฉ๋‹ˆ๋‹ค. ์šฐ๋ฆฌ๋Š” ์˜ˆ์‹œ ์Šคํฌ๋ฆฝํŠธ๋“ค์„ ์ž์ฃผ ์—…๋ฐ์ดํŠธํ•˜๊ณ  ์˜ˆ์‹œ์— ๋งž์ถ˜ ํŠน์ •ํ•œ ์š”๊ตฌ์‚ฌํ•ญ์„ ์„ค์น˜ํ•ฉ๋‹ˆ๋‹ค. </Tip> ์œ„ ์‚ฌํ•ญ์„ ๋งŒ์กฑ์‹œํ‚ค๊ธฐ ์œ„ํ•ด์„œ, ์ƒˆ๋กœ์šด ๊ฐ€์ƒํ™˜๊ฒฝ์—์„œ ๋‹ค์Œ ์ผ๋ จ์˜ ์Šคํ…์„ ์‹คํ–‰ํ•˜์„ธ์š”: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install -e . ``` ๊ทธ ๋‹ค์Œ์—๋Š” [์˜ˆ์‹œ ํด๋”](https://github.com/huggingface/diffusers/tree/main/examples/controlnet)์œผ๋กœ ์ด๋™ํ•ฉ๋‹ˆ๋‹ค. ```bash cd examples/controlnet ``` ์ด์ œ ์‹คํ–‰ํ•˜์„ธ์š”: ```bash pip install -r requirements.txt ``` [๐Ÿค—Accelerate](https://github.com/huggingface/accelerate/) ํ™˜๊ฒฝ์„ ์ดˆ๊ธฐํ™” ํ•ฉ๋‹ˆ๋‹ค: ```bash accelerate config ``` ํ˜น์€ ์—ฌ๋Ÿฌ๋ถ„์˜ ํ™˜๊ฒฝ์ด ๋ฌด์—‡์ธ์ง€ ๋ชฐ๋ผ๋„ ๊ธฐ๋ณธ์ ์ธ ๐Ÿค—Accelerate ๊ตฌ์„ฑ์œผ๋กœ ์ดˆ๊ธฐํ™”ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```bash accelerate config default ``` ํ˜น์€ ๋‹น์‹ ์˜ ํ™˜๊ฒฝ์ด ๋…ธํŠธ๋ถ ๊ฐ™์€ ์ƒํ˜ธ์ž‘์šฉํ•˜๋Š” ์‰˜์„ ์ง€์›ํ•˜์ง€ ์•Š๋Š”๋‹ค๋ฉด, ์•„๋ž˜์˜ ์ฝ”๋“œ๋กœ ์ดˆ๊ธฐํ™” ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```python from accelerate.utils import write_basic_config write_basic_config() ``` ## ์›์„ ์ฑ„์šฐ๋Š” ๋ฐ์ดํ„ฐ์…‹ ์›๋ณธ ๋ฐ์ดํ„ฐ์…‹์€ ControlNet [repo](https://huggingface.co/lllyasviel/ControlNet/blob/main/training/fill50k.zip)์— ์˜ฌ๋ผ์™€์žˆ์ง€๋งŒ, ์šฐ๋ฆฌ๋Š” [์—ฌ๊ธฐ](https://huggingface.co/datasets/fusing/fill50k)์— ์ƒˆ๋กญ๊ฒŒ ๋‹ค์‹œ ์˜ฌ๋ ค์„œ ๐Ÿค— Datasets ๊ณผ ํ˜ธํ™˜๊ฐ€๋Šฅํ•ฉ๋‹ˆ๋‹ค. ๊ทธ๋ž˜์„œ ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ ์ƒ์—์„œ ๋ฐ์ดํ„ฐ ๋ถˆ๋Ÿฌ์˜ค๊ธฐ๋ฅผ ๋‹ค๋ฃฐ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์šฐ๋ฆฌ์˜ ํ•™์Šต ์˜ˆ์‹œ๋Š” ์›๋ž˜ ControlNet์˜ ํ•™์Šต์— ์“ฐ์˜€๋˜ [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5)์„ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค. ๊ทธ๋ ‡์ง€๋งŒ ControlNet์€ ๋Œ€์‘๋˜๋Š” ์–ด๋А Stable Diffusion ๋ชจ๋ธ([`CompVis/stable-diffusion-v1-4`](https://huggingface.co/CompVis/stable-diffusion-v1-4)) ํ˜น์€ [`stabilityai/stable-diffusion-2-1`](https://huggingface.co/stabilityai/stable-diffusion-2-1)์˜ ์ฆ๊ฐ€๋ฅผ ์œ„ํ•ด ํ•™์Šต๋  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ž์ฒด ๋ฐ์ดํ„ฐ์…‹์„ ์‚ฌ์šฉํ•˜๊ธฐ ์œ„ํ•ด์„œ๋Š” [ํ•™์Šต์„ ์œ„ํ•œ ๋ฐ์ดํ„ฐ์…‹ ์ƒ์„ฑํ•˜๊ธฐ](create_dataset) ๊ฐ€์ด๋“œ๋ฅผ ํ™•์ธํ•˜์„ธ์š”. ## ํ•™์Šต ์ด ํ•™์Šต์— ์‚ฌ์šฉ๋  ๋‹ค์Œ ์ด๋ฏธ์ง€๋“ค์„ ๋‹ค์šด๋กœ๋“œํ•˜์„ธ์š”: ```sh wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png ``` `MODEL_NAME` ํ™˜๊ฒฝ ๋ณ€์ˆ˜ (Hub ๋ชจ๋ธ ๋ฆฌํฌ์ง€ํ† ๋ฆฌ ์•„์ด๋”” ํ˜น์€ ๋ชจ๋ธ ๊ฐ€์ค‘์น˜๊ฐ€ ์žˆ๋Š” ๋””๋ ‰ํ† ๋ฆฌ๋กœ ๊ฐ€๋Š” ์ฃผ์†Œ)๋ฅผ ๋ช…์‹œํ•˜๊ณ  [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) ์ธ์ž๋กœ ํ™˜๊ฒฝ๋ณ€์ˆ˜๋ฅผ ๋ณด๋ƒ…๋‹ˆ๋‹ค. ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ๋Š” ๋‹น์‹ ์˜ ๋ฆฌํฌ์ง€ํ† ๋ฆฌ์— `diffusion_pytorch_model.bin` ํŒŒ์ผ์„ ์ƒ์„ฑํ•˜๊ณ  ์ €์žฅํ•ฉ๋‹ˆ๋‹ค. ```bash export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=4 \ --push_to_hub ``` ์ด ๊ธฐ๋ณธ์ ์ธ ์„ค์ •์œผ๋กœ๋Š” ~38GB VRAM์ด ํ•„์š”ํ•ฉ๋‹ˆ๋‹ค. ๊ธฐ๋ณธ์ ์œผ๋กœ ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ๋Š” ๊ฒฐ๊ณผ๋ฅผ ํ…์„œ๋ณด๋“œ์— ๊ธฐ๋กํ•ฉ๋‹ˆ๋‹ค. ๊ฐ€์ค‘์น˜(weight)์™€ ํŽธํ–ฅ(bias)์„ ์‚ฌ์šฉํ•˜๊ธฐ ์œ„ํ•ด `--report_to wandb` ๋ฅผ ์ „๋‹ฌํ•ฉ๋‹ˆ๋‹ค. ๋” ์ž‘์€ batch(๋ฐฐ์น˜) ํฌ๊ธฐ๋กœ gradient accumulation(๊ธฐ์šธ๊ธฐ ๋ˆ„์ )์„ ํ•˜๋ฉด ํ•™์Šต ์š”๊ตฌ์‚ฌํ•ญ์„ ~20 GB VRAM์œผ๋กœ ์ค„์ผ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ```bash export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --push_to_hub ``` ## ์—ฌ๋Ÿฌ๊ฐœ GPU๋กœ ํ•™์Šตํ•˜๊ธฐ `accelerate` ์€ seamless multi-GPU ํ•™์Šต์„ ๊ณ ๋ คํ•ฉ๋‹ˆ๋‹ค. `accelerate`๊ณผ ํ•จ๊ป˜ ๋ถ„์‚ฐ๋œ ํ•™์Šต์„ ์‹คํ–‰ํ•˜๊ธฐ ์œ„ํ•ด [์—ฌ๊ธฐ](https://huggingface.co/docs/accelerate/basic_tutorials/launch) ์˜ ์„ค๋ช…์„ ํ™•์ธํ•˜์„ธ์š”. ์•„๋ž˜๋Š” ์˜ˆ์‹œ ๋ช…๋ น์–ด์ž…๋‹ˆ๋‹ค: ```bash export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch --mixed_precision="fp16" --multi_gpu train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=4 \ --mixed_precision="fp16" \ --tracker_project_name="controlnet-demo" \ --report_to=wandb \ --push_to_hub ``` ## ์˜ˆ์‹œ ๊ฒฐ๊ณผ #### ๋ฐฐ์น˜ ์‚ฌ์ด์ฆˆ 8๋กœ 300 ์Šคํ… ์ดํ›„: | | | |-------------------|:-------------------------:| | | ํ‘ธ๋ฅธ ๋ฐฐ๊ฒฝ๊ณผ ๋นจ๊ฐ„ ์› | ![conditioning image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png) | ![ํ‘ธ๋ฅธ ๋ฐฐ๊ฒฝ๊ณผ ๋นจ๊ฐ„ ์›](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/red_circle_with_blue_background_300_steps.png) | | | ๊ฐˆ์ƒ‰ ๊ฝƒ ๋ฐฐ๊ฒฝ๊ณผ ์ฒญ๋ก์ƒ‰ ์› | ![conditioning image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png) | ![๊ฐˆ์ƒ‰ ๊ฝƒ ๋ฐฐ๊ฒฝ๊ณผ ์ฒญ๋ก์ƒ‰ ์›](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/cyan_circle_with_brown_floral_background_300_steps.png) | #### ๋ฐฐ์น˜ ์‚ฌ์ด์ฆˆ 8๋กœ 6000 ์Šคํ… ์ดํ›„: | | | |-------------------|:-------------------------:| | | ํ‘ธ๋ฅธ ๋ฐฐ๊ฒฝ๊ณผ ๋นจ๊ฐ„ ์› | ![conditioning image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png) | ![ํ‘ธ๋ฅธ ๋ฐฐ๊ฒฝ๊ณผ ๋นจ๊ฐ„ ์›](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/red_circle_with_blue_background_6000_steps.png) | | | ๊ฐˆ์ƒ‰ ๊ฝƒ ๋ฐฐ๊ฒฝ๊ณผ ์ฒญ๋ก์ƒ‰ ์› | ![conditioning image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png) | ![๊ฐˆ์ƒ‰ ๊ฝƒ ๋ฐฐ๊ฒฝ๊ณผ ์ฒญ๋ก์ƒ‰ ์›](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/cyan_circle_with_brown_floral_background_6000_steps.png) | ## 16GB GPU์—์„œ ํ•™์Šตํ•˜๊ธฐ 16GB GPU์—์„œ ํ•™์Šตํ•˜๊ธฐ ์œ„ํ•ด ๋‹ค์Œ์˜ ์ตœ์ ํ™”๋ฅผ ์ง„ํ–‰ํ•˜์„ธ์š”: - ๊ธฐ์šธ๊ธฐ ์ฒดํฌํฌ์ธํŠธ ์ €์žฅํ•˜๊ธฐ - bitsandbyte์˜ [8-bit optimizer](https://github.com/TimDettmers/bitsandbytes#requirements--installation)๊ฐ€ ์„ค์น˜๋˜์ง€ ์•Š์•˜๋‹ค๋ฉด ๋งํฌ์— ์—ฐ๊ฒฐ๋œ ์„ค๋ช…์„œ๋ฅผ ๋ณด์„ธ์š”. ์ด์ œ ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์‹œ์ž‘ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```bash export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --use_8bit_adam \ --push_to_hub ``` ## 12GB GPU์—์„œ ํ•™์Šตํ•˜๊ธฐ 12GB GPU์—์„œ ์‹คํ–‰ํ•˜๊ธฐ ์œ„ํ•ด ๋‹ค์Œ์˜ ์ตœ์ ํ™”๋ฅผ ์ง„ํ–‰ํ•˜์„ธ์š”: - ๊ธฐ์šธ๊ธฐ ์ฒดํฌํฌ์ธํŠธ ์ €์žฅํ•˜๊ธฐ - bitsandbyte์˜ 8-bit [optimizer](https://github.com/TimDettmers/bitsandbytes#requirements--installation)(๊ฐ€ ์„ค์น˜๋˜์ง€ ์•Š์•˜๋‹ค๋ฉด ๋งํฌ์— ์—ฐ๊ฒฐ๋œ ์„ค๋ช…์„œ๋ฅผ ๋ณด์„ธ์š”) - [xFormers](https://huggingface.co/docs/diffusers/training/optimization/xformers)(๊ฐ€ ์„ค์น˜๋˜์ง€ ์•Š์•˜๋‹ค๋ฉด ๋งํฌ์— ์—ฐ๊ฒฐ๋œ ์„ค๋ช…์„œ๋ฅผ ๋ณด์„ธ์š”) - ๊ธฐ์šธ๊ธฐ๋ฅผ `None`์œผ๋กœ ์„ค์ • ```bash export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --use_8bit_adam \ --enable_xformers_memory_efficient_attention \ --set_grads_to_none \ --push_to_hub ``` `pip install xformers`์œผ๋กœ `xformers`์„ ํ™•์‹คํžˆ ์„ค์น˜ํ•˜๊ณ  `enable_xformers_memory_efficient_attention`์„ ์‚ฌ์šฉํ•˜์„ธ์š”. ## 8GB GPU์—์„œ ํ•™์Šตํ•˜๊ธฐ ์šฐ๋ฆฌ๋Š” ControlNet์„ ์ง€์›ํ•˜๊ธฐ ์œ„ํ•œ DeepSpeed๋ฅผ ์ฒ ์ €ํ•˜๊ฒŒ ํ…Œ์ŠคํŠธํ•˜์ง€ ์•Š์•˜์Šต๋‹ˆ๋‹ค. ํ™˜๊ฒฝ์„ค์ •์ด ๋ฉ”๋ชจ๋ฆฌ๋ฅผ ์ €์žฅํ•  ๋•Œ, ๊ทธ ํ™˜๊ฒฝ์ด ์„ฑ๊ณต์ ์œผ๋กœ ํ•™์Šตํ–ˆ๋Š”์ง€๋ฅผ ํ™•์ •ํ•˜์ง€ ์•Š์•˜์Šต๋‹ˆ๋‹ค. ์„ฑ๊ณตํ•œ ํ•™์Šต ์‹คํ–‰์„ ์œ„ํ•ด ์„ค์ •์„ ๋ณ€๊ฒฝํ•ด์•ผ ํ•  ๊ฐ€๋Šฅ์„ฑ์ด ๋†’์Šต๋‹ˆ๋‹ค. 8GB GPU์—์„œ ์‹คํ–‰ํ•˜๊ธฐ ์œ„ํ•ด ๋‹ค์Œ์˜ ์ตœ์ ํ™”๋ฅผ ์ง„ํ–‰ํ•˜์„ธ์š”: - ๊ธฐ์šธ๊ธฐ ์ฒดํฌํฌ์ธํŠธ ์ €์žฅํ•˜๊ธฐ - bitsandbyte์˜ 8-bit [optimizer](https://github.com/TimDettmers/bitsandbytes#requirements--installation)(๊ฐ€ ์„ค์น˜๋˜์ง€ ์•Š์•˜๋‹ค๋ฉด ๋งํฌ์— ์—ฐ๊ฒฐ๋œ ์„ค๋ช…์„œ๋ฅผ ๋ณด์„ธ์š”) - [xFormers](https://huggingface.co/docs/diffusers/training/optimization/xformers)(๊ฐ€ ์„ค์น˜๋˜์ง€ ์•Š์•˜๋‹ค๋ฉด ๋งํฌ์— ์—ฐ๊ฒฐ๋œ ์„ค๋ช…์„œ๋ฅผ ๋ณด์„ธ์š”) - ๊ธฐ์šธ๊ธฐ๋ฅผ `None`์œผ๋กœ ์„ค์ • - DeepSpeed stage 2 ๋ณ€์ˆ˜์™€ optimizer ์—†์—๊ธฐ - fp16 ํ˜ผํ•ฉ ์ •๋ฐ€๋„(precision) [DeepSpeed](https://www.deepspeed.ai/)๋Š” CPU ๋˜๋Š” NVME๋กœ ํ…์„œ๋ฅผ VRAM์—์„œ ์˜คํ”„๋กœ๋“œํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด๋ฅผ ์œ„ํ•ด์„œ ํ›จ์”ฌ ๋” ๋งŽ์€ RAM(์•ฝ 25 GB)๊ฐ€ ํ•„์š”ํ•ฉ๋‹ˆ๋‹ค. DeepSpeed stage 2๋ฅผ ํ™œ์„ฑํ™”ํ•˜๊ธฐ ์œ„ํ•ด์„œ `accelerate config`๋กœ ํ™˜๊ฒฝ์„ ๊ตฌ์„ฑํ•ด์•ผํ•ฉ๋‹ˆ๋‹ค. ๊ตฌ์„ฑ(configuration) ํŒŒ์ผ์€ ์ด๋Ÿฐ ๋ชจ์Šต์ด์–ด์•ผ ํ•ฉ๋‹ˆ๋‹ค: ```yaml compute_environment: LOCAL_MACHINE deepspeed_config: gradient_accumulation_steps: 4 offload_optimizer_device: cpu offload_param_device: cpu zero3_init_flag: false zero_stage: 2 distributed_type: DEEPSPEED ``` <ํŒ> [๋ฌธ์„œ](https://huggingface.co/docs/accelerate/usage_guides/deepspeed)๋ฅผ ๋” ๋งŽ์€ DeepSpeed ์„ค์ • ์˜ต์…˜์„ ์œ„ํ•ด ๋ณด์„ธ์š”. <ํŒ> ๊ธฐ๋ณธ Adam optimizer๋ฅผ DeepSpeed'์˜ Adam `deepspeed.ops.adam.DeepSpeedCPUAdam` ์œผ๋กœ ๋ฐ”๊พธ๋ฉด ์ƒ๋‹นํ•œ ์†๋„ ํ–ฅ์ƒ์„ ์ด๋ฃฐ์ˆ˜ ์žˆ์ง€๋งŒ, Pytorch์™€ ๊ฐ™์€ ๋ฒ„์ „์˜ CUDA toolchain์ด ํ•„์š”ํ•ฉ๋‹ˆ๋‹ค. 8-๋น„ํŠธ optimizer๋Š” ํ˜„์žฌ DeepSpeed์™€ ํ˜ธํ™˜๋˜์ง€ ์•Š๋Š” ๊ฒƒ ๊ฐ™์Šต๋‹ˆ๋‹ค. ```bash export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --enable_xformers_memory_efficient_attention \ --set_grads_to_none \ --mixed_precision fp16 \ --push_to_hub ``` ## ์ถ”๋ก  ํ•™์Šต๋œ ๋ชจ๋ธ์€ [`StableDiffusionControlNetPipeline`]๊ณผ ํ•จ๊ป˜ ์‹คํ–‰๋  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. `base_model_path`์™€ `controlnet_path` ์— ๊ฐ’์„ ์ง€์ •ํ•˜์„ธ์š” `--pretrained_model_name_or_path` ์™€ `--output_dir` ๋Š” ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ์— ๊ฐœ๋ณ„์ ์œผ๋กœ ์ง€์ •๋ฉ๋‹ˆ๋‹ค. ```py from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler from diffusers.utils import load_image import torch base_model_path = "path to model" controlnet_path = "path to controlnet" controlnet = ControlNetModel.from_pretrained(controlnet_path, torch_dtype=torch.float16) pipe = StableDiffusionControlNetPipeline.from_pretrained( base_model_path, controlnet=controlnet, torch_dtype=torch.float16 ) # ๋” ๋น ๋ฅธ ์Šค์ผ€์ค„๋Ÿฌ์™€ ๋ฉ”๋ชจ๋ฆฌ ์ตœ์ ํ™”๋กœ diffusion ํ”„๋กœ์„ธ์Šค ์†๋„ ์˜ฌ๋ฆฌ๊ธฐ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) # xformers๊ฐ€ ์„ค์น˜๋˜์ง€ ์•Š์œผ๋ฉด ์•„๋ž˜ ์ค„์„ ์‚ญ์ œํ•˜๊ธฐ pipe.enable_xformers_memory_efficient_attention() pipe.enable_model_cpu_offload() control_image = load_image("./conditioning_image_1.png") prompt = "pale golden rod circle with old lace background" # ์ด๋ฏธ์ง€ ์ƒ์„ฑํ•˜๊ธฐ generator = torch.manual_seed(0) image = pipe(prompt, num_inference_steps=20, generator=generator, image=control_image).images[0] image.save("./output.png") ```
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/training/text_inversion.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Textual-Inversion [[open-in-colab]] [textual-inversion](https://arxiv.org/abs/2208.01618)์€ ์†Œ์ˆ˜์˜ ์˜ˆ์‹œ ์ด๋ฏธ์ง€์—์„œ ์ƒˆ๋กœ์šด ์ฝ˜์…‰ํŠธ๋ฅผ ํฌ์ฐฉํ•˜๋Š” ๊ธฐ๋ฒ•์ž…๋‹ˆ๋‹ค. ์ด ๊ธฐ์ˆ ์€ ์›๋ž˜ [Latent Diffusion](https://github.com/CompVis/latent-diffusion)์—์„œ ์‹œ์—ฐ๋˜์—ˆ์ง€๋งŒ, ์ดํ›„ [Stable Diffusion](https://huggingface.co/docs/diffusers/main/en/conceptual/stable_diffusion)๊ณผ ๊ฐ™์€ ์œ ์‚ฌํ•œ ๋‹ค๋ฅธ ๋ชจ๋ธ์—๋„ ์ ์šฉ๋˜์—ˆ์Šต๋‹ˆ๋‹ค. ํ•™์Šต๋œ ์ฝ˜์…‰ํŠธ๋Š” text-to-image ํŒŒ์ดํ”„๋ผ์ธ์—์„œ ์ƒ์„ฑ๋œ ์ด๋ฏธ์ง€๋ฅผ ๋” ์ž˜ ์ œ์–ดํ•˜๋Š” ๋ฐ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด ๋ชจ๋ธ์€ ํ…์ŠคํŠธ ์ธ์ฝ”๋”์˜ ์ž„๋ฒ ๋”ฉ ๊ณต๊ฐ„์—์„œ ์ƒˆ๋กœ์šด '๋‹จ์–ด'๋ฅผ ํ•™์Šตํ•˜์—ฌ ๊ฐœ์ธํ™”๋œ ์ด๋ฏธ์ง€ ์ƒ์„ฑ์„ ์œ„ํ•œ ํ…์ŠคํŠธ ํ”„๋กฌํ”„ํŠธ ๋‚ด์—์„œ ์‚ฌ์šฉ๋ฉ๋‹ˆ๋‹ค. ![Textual Inversion example](https://textual-inversion.github.io/static/images/editing/colorful_teapot.JPG) <small>By using just 3-5 images you can teach new concepts to a model such as Stable Diffusion for personalized image generation <a href="https://github.com/rinongal/textual_inversion">(image source)</a>.</small> ์ด ๊ฐ€์ด๋“œ์—์„œ๋Š” textual-inversion์œผ๋กœ [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) ๋ชจ๋ธ์„ ํ•™์Šตํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ์„ค๋ช…ํ•ฉ๋‹ˆ๋‹ค. ์ด ๊ฐ€์ด๋“œ์—์„œ ์‚ฌ์šฉ๋œ ๋ชจ๋“  textual-inversion ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ๋Š” [์—ฌ๊ธฐ](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion)์—์„œ ํ™•์ธํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋‚ด๋ถ€์ ์œผ๋กœ ์–ด๋–ป๊ฒŒ ์ž‘๋™ํ•˜๋Š”์ง€ ์ž์„ธํžˆ ์‚ดํŽด๋ณด๊ณ  ์‹ถ์œผ์‹œ๋‹ค๋ฉด ํ•ด๋‹น ๋งํฌ๋ฅผ ์ฐธ์กฐํ•ด์ฃผ์‹œ๊ธฐ ๋ฐ”๋ž๋‹ˆ๋‹ค. <Tip> [Stable Diffusion Textual Inversion Concepts Library](https://huggingface.co/sd-concepts-library)์—๋Š” ์ปค๋ฎค๋‹ˆํ‹ฐ์—์„œ ์ œ์ž‘ํ•œ ํ•™์Šต๋œ textual-inversion ๋ชจ๋ธ๋“ค์ด ์žˆ์Šต๋‹ˆ๋‹ค. ์‹œ๊ฐ„์ด ์ง€๋‚จ์— ๋”ฐ๋ผ ๋” ๋งŽ์€ ์ฝ˜์…‰ํŠธ๋“ค์ด ์ถ”๊ฐ€๋˜์–ด ์œ ์šฉํ•œ ๋ฆฌ์†Œ์Šค๋กœ ์„ฑ์žฅํ•  ๊ฒƒ์ž…๋‹ˆ๋‹ค! </Tip> ์‹œ์ž‘ํ•˜๊ธฐ ์ „์— ํ•™์Šต์„ ์œ„ํ•œ ์˜์กด์„ฑ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๋“ค์„ ์„ค์น˜ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค: ```bash pip install diffusers accelerate transformers ``` ์˜์กด์„ฑ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๋“ค์˜ ์„ค์น˜๊ฐ€ ์™„๋ฃŒ๋˜๋ฉด, [๐Ÿค—Accelerate](https://github.com/huggingface/accelerate/) ํ™˜๊ฒฝ์„ ์ดˆ๊ธฐํ™”์‹œํ‚ต๋‹ˆ๋‹ค. ```bash accelerate config ``` ๋ณ„๋„์˜ ์„ค์ •์—†์ด, ๊ธฐ๋ณธ ๐Ÿค—Accelerate ํ™˜๊ฒฝ์„ ์„ค์ •ํ•˜๋ ค๋ฉด ๋‹ค์Œ๊ณผ ๊ฐ™์ด ํ•˜์„ธ์š”: ```bash accelerate config default ``` ๋˜๋Š” ์‚ฌ์šฉ ์ค‘์ธ ํ™˜๊ฒฝ์ด ๋…ธํŠธ๋ถ๊ณผ ๊ฐ™์€ ๋Œ€ํ™”ํ˜• ์…ธ์„ ์ง€์›ํ•˜์ง€ ์•Š๋Š”๋‹ค๋ฉด, ๋‹ค์Œ๊ณผ ๊ฐ™์ด ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```py from accelerate.utils import write_basic_config write_basic_config() ``` ๋งˆ์ง€๋ง‰์œผ๋กœ, Memory-Efficient Attention์„ ํ†ตํ•ด ๋ฉ”๋ชจ๋ฆฌ ์‚ฌ์šฉ๋Ÿ‰์„ ์ค„์ด๊ธฐ ์œ„ํ•ด [xFormers](https://huggingface.co/docs/diffusers/main/en/training/optimization/xformers)๋ฅผ ์„ค์น˜ํ•ฉ๋‹ˆ๋‹ค. xFormers๋ฅผ ์„ค์น˜ํ•œ ํ›„, ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ์— `--enable_xformers_memory_efficient_attention` ์ธ์ž๋ฅผ ์ถ”๊ฐ€ํ•ฉ๋‹ˆ๋‹ค. xFormers๋Š” Flax์—์„œ ์ง€์›๋˜์ง€ ์•Š์Šต๋‹ˆ๋‹ค. ## ํ—ˆ๋ธŒ์— ๋ชจ๋ธ ์—…๋กœ๋“œํ•˜๊ธฐ ๋ชจ๋ธ์„ ํ—ˆ๋ธŒ์— ์ €์žฅํ•˜๋ ค๋ฉด, ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ์— ๋‹ค์Œ ์ธ์ž๋ฅผ ์ถ”๊ฐ€ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ```bash --push_to_hub ``` ## ์ฒดํฌํฌ์ธํŠธ ์ €์žฅ ๋ฐ ๋ถˆ๋Ÿฌ์˜ค๊ธฐ ํ•™์Šต์ค‘์— ๋ชจ๋ธ์˜ ์ฒดํฌํฌ์ธํŠธ๋ฅผ ์ •๊ธฐ์ ์œผ๋กœ ์ €์žฅํ•˜๋Š” ๊ฒƒ์ด ์ข‹์Šต๋‹ˆ๋‹ค. ์ด๋ ‡๊ฒŒ ํ•˜๋ฉด ์–ด๋–ค ์ด์œ ๋กœ๋“  ํ•™์Šต์ด ์ค‘๋‹จ๋œ ๊ฒฝ์šฐ ์ €์žฅ๋œ ์ฒดํฌํฌ์ธํŠธ์—์„œ ํ•™์Šต์„ ๋‹ค์‹œ ์‹œ์ž‘ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ์— ๋‹ค์Œ ์ธ์ž๋ฅผ ์ „๋‹ฌํ•˜๋ฉด 500๋‹จ๊ณ„๋งˆ๋‹ค ์ „์ฒด ํ•™์Šต ์ƒํƒœ๊ฐ€ `output_dir`์˜ ํ•˜์œ„ ํด๋”์— ์ฒดํฌํฌ์ธํŠธ๋กœ์„œ ์ €์žฅ๋ฉ๋‹ˆ๋‹ค. ```bash --checkpointing_steps=500 ``` ์ €์žฅ๋œ ์ฒดํฌํฌ์ธํŠธ์—์„œ ํ•™์Šต์„ ์žฌ๊ฐœํ•˜๋ ค๋ฉด, ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ์™€ ์žฌ๊ฐœํ•  ํŠน์ • ์ฒดํฌํฌ์ธํŠธ์— ๋‹ค์Œ ์ธ์ž๋ฅผ ์ „๋‹ฌํ•˜์„ธ์š”. ```bash --resume_from_checkpoint="checkpoint-1500" ``` ## ํŒŒ์ธ ํŠœ๋‹ ํ•™์Šต์šฉ ๋ฐ์ดํ„ฐ์…‹์œผ๋กœ [๊ณ ์–‘์ด ์žฅ๋‚œ๊ฐ ๋ฐ์ดํ„ฐ์…‹](https://huggingface.co/datasets/diffusers/cat_toy_example)์„ ๋‹ค์šด๋กœ๋“œํ•˜์—ฌ ๋””๋ ‰ํ† ๋ฆฌ์— ์ €์žฅํ•˜์„ธ์š”. ์—ฌ๋Ÿฌ๋ถ„๋งŒ์˜ ๊ณ ์œ ํ•œ ๋ฐ์ดํ„ฐ์…‹์„ ์‚ฌ์šฉํ•˜๊ณ ์ž ํ•œ๋‹ค๋ฉด, [ํ•™์Šต์šฉ ๋ฐ์ดํ„ฐ์…‹ ๋งŒ๋“ค๊ธฐ](https://huggingface.co/docs/diffusers/training/create_dataset) ๊ฐ€์ด๋“œ๋ฅผ ์‚ดํŽด๋ณด์‹œ๊ธฐ ๋ฐ”๋ž๋‹ˆ๋‹ค. ```py from huggingface_hub import snapshot_download local_dir = "./cat" snapshot_download( "diffusers/cat_toy_example", local_dir=local_dir, repo_type="dataset", ignore_patterns=".gitattributes" ) ``` ๋ชจ๋ธ์˜ ๋ฆฌํฌ์ง€ํ† ๋ฆฌ ID(๋˜๋Š” ๋ชจ๋ธ ๊ฐ€์ค‘์น˜๊ฐ€ ํฌํ•จ๋œ ๋””๋ ‰ํ„ฐ๋ฆฌ ๊ฒฝ๋กœ)๋ฅผ `MODEL_NAME` ํ™˜๊ฒฝ ๋ณ€์ˆ˜์— ํ• ๋‹นํ•˜๊ณ , ํ•ด๋‹น ๊ฐ’์„ [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) ์ธ์ž์— ์ „๋‹ฌํ•ฉ๋‹ˆ๋‹ค. ๊ทธ๋ฆฌ๊ณ  ์ด๋ฏธ์ง€๊ฐ€ ํฌํ•จ๋œ ๋””๋ ‰ํ„ฐ๋ฆฌ ๊ฒฝ๋กœ๋ฅผ `DATA_DIR` ํ™˜๊ฒฝ ๋ณ€์ˆ˜์— ํ• ๋‹นํ•ฉ๋‹ˆ๋‹ค. ์ด์ œ [ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion.py)๋ฅผ ์‹คํ–‰ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์Šคํฌ๋ฆฝํŠธ๋Š” ๋‹ค์Œ ํŒŒ์ผ์„ ์ƒ์„ฑํ•˜๊ณ  ๋ฆฌํฌ์ง€ํ† ๋ฆฌ์— ์ €์žฅํ•ฉ๋‹ˆ๋‹ค. - `learned_embeds.bin` - `token_identifier.txt` - `type_of_concept.txt`. <Tip> ๐Ÿ’กV100 GPU 1๊ฐœ๋ฅผ ๊ธฐ์ค€์œผ๋กœ ์ „์ฒด ํ•™์Šต์—๋Š” ์ตœ๋Œ€ 1์‹œ๊ฐ„์ด ๊ฑธ๋ฆฝ๋‹ˆ๋‹ค. ํ•™์Šต์ด ์™„๋ฃŒ๋˜๊ธฐ๋ฅผ ๊ธฐ๋‹ค๋ฆฌ๋Š” ๋™์•ˆ ๊ถ๊ธˆํ•œ ์ ์ด ์žˆ์œผ๋ฉด ์•„๋ž˜ ์„น์…˜์—์„œ [textual-inversion์ด ์–ด๋–ป๊ฒŒ ์ž‘๋™ํ•˜๋Š”์ง€](https://huggingface.co/docs/diffusers/training/text_inversion#how-it-works) ์ž์œ ๋กญ๊ฒŒ ํ™•์ธํ•˜์„ธ์š” ! </Tip> <frameworkcontent> <pt> ```bash export MODEL_NAME="runwayml/stable-diffusion-v1-5" export DATA_DIR="./cat" accelerate launch textual_inversion.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$DATA_DIR \ --learnable_property="object" \ --placeholder_token="<cat-toy>" --initializer_token="toy" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --max_train_steps=3000 \ --learning_rate=5.0e-04 --scale_lr \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --output_dir="textual_inversion_cat" \ --push_to_hub ``` <Tip> ๐Ÿ’กํ•™์Šต ์„ฑ๋Šฅ์„ ์˜ฌ๋ฆฌ๊ธฐ ์œ„ํ•ด, ํ”Œ๋ ˆ์ด์Šคํ™€๋” ํ† ํฐ(`<cat-toy>`)์„ (๋‹จ์ผํ•œ ์ž„๋ฒ ๋”ฉ ๋ฒกํ„ฐ๊ฐ€ ์•„๋‹Œ) ๋ณต์ˆ˜์˜ ์ž„๋ฒ ๋”ฉ ๋ฒกํ„ฐ๋กœ ํ‘œํ˜„ํ•˜๋Š” ๊ฒƒ ์—ญ์‹œ ๊ณ ๋ คํ•  ์žˆ์Šต๋‹ˆ๋‹ค. ์ด๋Ÿฌํ•œ ํŠธ๋ฆญ์ด ๋ชจ๋ธ์ด ๋ณด๋‹ค ๋ณต์žกํ•œ ์ด๋ฏธ์ง€์˜ ์Šคํƒ€์ผ(์•ž์„œ ๋งํ•œ ์ฝ˜์…‰ํŠธ)์„ ๋” ์ž˜ ์บก์ฒ˜ํ•˜๋Š” ๋ฐ ๋„์›€์ด ๋  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋ณต์ˆ˜์˜ ์ž„๋ฒ ๋”ฉ ๋ฒกํ„ฐ ํ•™์Šต์„ ํ™œ์„ฑํ™”ํ•˜๋ ค๋ฉด ๋‹ค์Œ ์˜ต์…˜์„ ์ „๋‹ฌํ•˜์‹ญ์‹œ์˜ค. ```bash --num_vectors=5 ``` </Tip> </pt> <jax> TPU์— ์•ก์„ธ์Šคํ•  ์ˆ˜ ์žˆ๋Š” ๊ฒฝ์šฐ, [Flax ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion_flax.py)๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ๋” ๋น ๋ฅด๊ฒŒ ๋ชจ๋ธ์„ ํ•™์Šต์‹œ์ผœ๋ณด์„ธ์š”. (๋ฌผ๋ก  GPU์—์„œ๋„ ์ž‘๋™ํ•ฉ๋‹ˆ๋‹ค.) ๋™์ผํ•œ ์„ค์ •์—์„œ Flax ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ๋Š” PyTorch ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ๋ณด๋‹ค ์ตœ์†Œ 70% ๋” ๋นจ๋ผ์•ผ ํ•ฉ๋‹ˆ๋‹ค! โšก๏ธ ์‹œ์ž‘ํ•˜๊ธฐ ์•ž์„œ Flax์— ๋Œ€ํ•œ ์˜์กด์„ฑ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๋“ค์„ ์„ค์น˜ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ```bash pip install -U -r requirements_flax.txt ``` ๋ชจ๋ธ์˜ ๋ฆฌํฌ์ง€ํ† ๋ฆฌ ID(๋˜๋Š” ๋ชจ๋ธ ๊ฐ€์ค‘์น˜๊ฐ€ ํฌํ•จ๋œ ๋””๋ ‰ํ„ฐ๋ฆฌ ๊ฒฝ๋กœ)๋ฅผ `MODEL_NAME` ํ™˜๊ฒฝ ๋ณ€์ˆ˜์— ํ• ๋‹นํ•˜๊ณ , ํ•ด๋‹น ๊ฐ’์„ [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) ์ธ์ž์— ์ „๋‹ฌํ•ฉ๋‹ˆ๋‹ค. ๊ทธ๋Ÿฐ ๋‹ค์Œ [ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion_flax.py)๋ฅผ ์‹œ์ž‘ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ```bash export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" export DATA_DIR="./cat" python textual_inversion_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$DATA_DIR \ --learnable_property="object" \ --placeholder_token="<cat-toy>" --initializer_token="toy" \ --resolution=512 \ --train_batch_size=1 \ --max_train_steps=3000 \ --learning_rate=5.0e-04 --scale_lr \ --output_dir="textual_inversion_cat" \ --push_to_hub ``` </jax> </frameworkcontent> ### ์ค‘๊ฐ„ ๋กœ๊น… ๋ชจ๋ธ์˜ ํ•™์Šต ์ง„ํ–‰ ์ƒํ™ฉ์„ ์ถ”์ ํ•˜๋Š” ๋ฐ ๊ด€์‹ฌ์ด ์žˆ๋Š” ๊ฒฝ์šฐ, ํ•™์Šต ๊ณผ์ •์—์„œ ์ƒ์„ฑ๋œ ์ด๋ฏธ์ง€๋ฅผ ์ €์žฅํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ์— ๋‹ค์Œ ์ธ์ˆ˜๋ฅผ ์ถ”๊ฐ€ํ•˜์—ฌ ์ค‘๊ฐ„ ๋กœ๊น…์„ ํ™œ์„ฑํ™”ํ•ฉ๋‹ˆ๋‹ค. - `validation_prompt` : ์ƒ˜ํ”Œ์„ ์ƒ์„ฑํ•˜๋Š” ๋ฐ ์‚ฌ์šฉ๋˜๋Š” ํ”„๋กฌํ”„ํŠธ(๊ธฐ๋ณธ๊ฐ’์€ `None`์œผ๋กœ ์„ค์ •๋˜๋ฉฐ, ์ด ๋•Œ ์ค‘๊ฐ„ ๋กœ๊น…์€ ๋น„ํ™œ์„ฑํ™”๋จ) - `num_validation_images` : ์ƒ์„ฑํ•  ์ƒ˜ํ”Œ ์ด๋ฏธ์ง€ ์ˆ˜ - `validation_steps` : `validation_prompt`๋กœ๋ถ€ํ„ฐ ์ƒ˜ํ”Œ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•˜๊ธฐ ์ „ ์Šคํ…์˜ ์ˆ˜ ```bash --validation_prompt="A <cat-toy> backpack" --num_validation_images=4 --validation_steps=100 ``` ## ์ถ”๋ก  ๋ชจ๋ธ์„ ํ•™์Šตํ•œ ํ›„์—๋Š”, ํ•ด๋‹น ๋ชจ๋ธ์„ [`StableDiffusionPipeline`]์„ ์‚ฌ์šฉํ•˜์—ฌ ์ถ”๋ก ์— ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. textual-inversion ์Šคํฌ๋ฆฝํŠธ๋Š” ๊ธฐ๋ณธ์ ์œผ๋กœ textual-inversion์„ ํ†ตํ•ด ์–ป์–ด์ง„ ์ž„๋ฒ ๋”ฉ ๋ฒกํ„ฐ๋งŒ์„ ์ €์žฅํ•ฉ๋‹ˆ๋‹ค. ํ•ด๋‹น ์ž„๋ฒ ๋”ฉ ๋ฒกํ„ฐ๋“ค์€ ํ…์ŠคํŠธ ์ธ์ฝ”๋”์˜ ์ž„๋ฒ ๋”ฉ ํ–‰๋ ฌ์— ์ถ”๊ฐ€๋˜์–ด ์žˆ์Šต์Šต๋‹ˆ๋‹ค. <frameworkcontent> <pt> <Tip> ๐Ÿ’ก ์ปค๋ฎค๋‹ˆํ‹ฐ๋Š” [sd-concepts-library](https://huggingface.co/sd-concepts-library) ๋ผ๋Š” ๋Œ€๊ทœ๋ชจ์˜ textual-inversion ์ž„๋ฒ ๋”ฉ ๋ฒกํ„ฐ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๋ฅผ ๋งŒ๋“ค์—ˆ์Šต๋‹ˆ๋‹ค. textual-inversion ์ž„๋ฒ ๋”ฉ์„ ๋ฐ‘๋ฐ”๋‹ฅ๋ถ€ํ„ฐ ํ•™์Šตํ•˜๋Š” ๋Œ€์‹ , ํ•ด๋‹น ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ์— ๋ณธ์ธ์ด ์ฐพ๋Š” textual-inversion ์ž„๋ฒ ๋”ฉ์ด ์ด๋ฏธ ์ถ”๊ฐ€๋˜์–ด ์žˆ์ง€ ์•Š์€์ง€๋ฅผ ํ™•์ธํ•˜๋Š” ๊ฒƒ๋„ ์ข‹์€ ๋ฐฉ๋ฒ•์ด ๋  ๊ฒƒ ๊ฐ™์Šต๋‹ˆ๋‹ค. </Tip> textual-inversion ์ž„๋ฒ ๋”ฉ ๋ฒกํ„ฐ์„ ๋ถˆ๋Ÿฌ์˜ค๊ธฐ ์œ„ํ•ด์„œ๋Š”, ๋จผ์ € ํ•ด๋‹น ์ž„๋ฒ ๋”ฉ ๋ฒกํ„ฐ๋ฅผ ํ•™์Šตํ•  ๋•Œ ์‚ฌ์šฉํ•œ ๋ชจ๋ธ์„ ๋ถˆ๋Ÿฌ์™€์•ผ ํ•ฉ๋‹ˆ๋‹ค. ์—ฌ๊ธฐ์„œ๋Š” [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/docs/diffusers/training/runwayml/stable-diffusion-v1-5) ๋ชจ๋ธ์ด ์‚ฌ์šฉ๋˜์—ˆ๋‹ค๊ณ  ๊ฐ€์ •ํ•˜๊ณ  ๋ถˆ๋Ÿฌ์˜ค๊ฒ ์Šต๋‹ˆ๋‹ค. ```python from diffusers import StableDiffusionPipeline import torch model_id = "runwayml/stable-diffusion-v1-5" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") ``` ๋‹ค์Œ์œผ๋กœ `TextualInversionLoaderMixin.load_textual_inversion` ํ•จ์ˆ˜๋ฅผ ํ†ตํ•ด, textual-inversion ์ž„๋ฒ ๋”ฉ ๋ฒกํ„ฐ๋ฅผ ๋ถˆ๋Ÿฌ์™€์•ผ ํ•ฉ๋‹ˆ๋‹ค. ์—ฌ๊ธฐ์„œ ์šฐ๋ฆฌ๋Š” ์ด์ „์˜ `<cat-toy>` ์˜ˆ์ œ์˜ ์ž„๋ฒ ๋”ฉ์„ ๋ถˆ๋Ÿฌ์˜ฌ ๊ฒƒ์ž…๋‹ˆ๋‹ค. ```python pipe.load_textual_inversion("sd-concepts-library/cat-toy") ``` ์ด์ œ ํ”Œ๋ ˆ์ด์Šคํ™€๋” ํ† ํฐ(`<cat-toy>`)์ด ์ž˜ ๋™์ž‘ํ•˜๋Š”์ง€๋ฅผ ํ™•์ธํ•˜๋Š” ํŒŒ์ดํ”„๋ผ์ธ์„ ์‹คํ–‰ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ```python prompt = "A <cat-toy> backpack" image = pipe(prompt, num_inference_steps=50).images[0] image.save("cat-backpack.png") ``` `TextualInversionLoaderMixin.load_textual_inversion`์€ Diffusers ํ˜•์‹์œผ๋กœ ์ €์žฅ๋œ ํ…์ŠคํŠธ ์ž„๋ฒ ๋”ฉ ๋ฒกํ„ฐ๋ฅผ ๋กœ๋“œํ•  ์ˆ˜ ์žˆ์„ ๋ฟ๋งŒ ์•„๋‹ˆ๋ผ, [Automatic1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui) ํ˜•์‹์œผ๋กœ ์ €์žฅ๋œ ์ž„๋ฒ ๋”ฉ ๋ฒกํ„ฐ๋„ ๋กœ๋“œํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด๋ ‡๊ฒŒ ํ•˜๋ ค๋ฉด, ๋จผ์ € [civitAI](https://civitai.com/models/3036?modelVersionId=8387)์—์„œ ์ž„๋ฒ ๋”ฉ ๋ฒกํ„ฐ๋ฅผ ๋‹ค์šด๋กœ๋“œํ•œ ๋‹ค์Œ ๋กœ์ปฌ์—์„œ ๋ถˆ๋Ÿฌ์™€์•ผ ํ•ฉ๋‹ˆ๋‹ค. ```python pipe.load_textual_inversion("./charturnerv2.pt") ``` </pt> <jax> ํ˜„์žฌ Flax์— ๋Œ€ํ•œ `load_textual_inversion` ํ•จ์ˆ˜๋Š” ์—†์Šต๋‹ˆ๋‹ค. ๋”ฐ๋ผ์„œ ํ•™์Šต ํ›„ textual-inversion ์ž„๋ฒ ๋”ฉ ๋ฒกํ„ฐ๊ฐ€ ๋ชจ๋ธ์˜ ์ผ๋ถ€๋กœ์„œ ์ €์žฅ๋˜์—ˆ๋Š”์ง€๋ฅผ ํ™•์ธํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ๊ทธ๋Ÿฐ ๋‹ค์Œ์€ ๋‹ค๋ฅธ Flax ๋ชจ๋ธ๊ณผ ๋งˆ์ฐฌ๊ฐ€์ง€๋กœ ์‹คํ–‰ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ```python import jax import numpy as np from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxStableDiffusionPipeline model_path = "path-to-your-trained-model" pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(model_path, dtype=jax.numpy.bfloat16) prompt = "A <cat-toy> backpack" prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, jax.device_count()) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) image.save("cat-backpack.png") ``` </jax> </frameworkcontent> ## ์ž‘๋™ ๋ฐฉ์‹ ![Diagram from the paper showing overview](https://textual-inversion.github.io/static/images/training/training.JPG) <small>Architecture overview from the Textual Inversion <a href="https://textual-inversion.github.io/">blog post.</a></small> ์ผ๋ฐ˜์ ์œผ๋กœ ํ…์ŠคํŠธ ํ”„๋กฌํ”„ํŠธ๋Š” ๋ชจ๋ธ์— ์ „๋‹ฌ๋˜๊ธฐ ์ „์— ์ž„๋ฒ ๋”ฉ์œผ๋กœ ํ† ํฐํ™”๋ฉ๋‹ˆ๋‹ค. textual-inversion์€ ๋น„์Šทํ•œ ์ž‘์—…์„ ์ˆ˜ํ–‰ํ•˜์ง€๋งŒ, ์œ„ ๋‹ค์ด์–ด๊ทธ๋žจ์˜ ํŠน์ˆ˜ ํ† ํฐ `S*`๋กœ๋ถ€ํ„ฐ ์ƒˆ๋กœ์šด ํ† ํฐ ์ž„๋ฒ ๋”ฉ `v*`๋ฅผ ํ•™์Šตํ•ฉ๋‹ˆ๋‹ค. ๋ชจ๋ธ์˜ ์•„์›ƒํ’‹์€ ๋””ํ“จ์ „ ๋ชจ๋ธ์„ ์กฐ์ •ํ•˜๋Š” ๋ฐ ์‚ฌ์šฉ๋˜๋ฉฐ, ๋””ํ“จ์ „ ๋ชจ๋ธ์ด ๋‹จ ๋ช‡ ๊ฐœ์˜ ์˜ˆ์ œ ์ด๋ฏธ์ง€์—์„œ ์‹ ์†ํ•˜๊ณ  ์ƒˆ๋กœ์šด ์ฝ˜์…‰ํŠธ๋ฅผ ์ดํ•ดํ•˜๋Š” ๋ฐ ๋„์›€์„ ์ค๋‹ˆ๋‹ค. ์ด๋ฅผ ์œ„ํ•ด textual-inversion์€ ์ œ๋„ˆ๋ ˆ์ดํ„ฐ ๋ชจ๋ธ๊ณผ ํ•™์Šต์šฉ ์ด๋ฏธ์ง€์˜ ๋…ธ์ด์ฆˆ ๋ฒ„์ „์„ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค. ์ œ๋„ˆ๋ ˆ์ดํ„ฐ๋Š” ๋…ธ์ด์ฆˆ๊ฐ€ ์ ์€ ๋ฒ„์ „์˜ ์ด๋ฏธ์ง€๋ฅผ ์˜ˆ์ธกํ•˜๋ ค๊ณ  ์‹œ๋„ํ•˜๋ฉฐ ํ† ํฐ ์ž„๋ฒ ๋”ฉ `v*`์€ ์ œ๋„ˆ๋ ˆ์ดํ„ฐ์˜ ์„ฑ๋Šฅ์— ๋”ฐ๋ผ ์ตœ์ ํ™”๋ฉ๋‹ˆ๋‹ค. ํ† ํฐ ์ž„๋ฒ ๋”ฉ์ด ์ƒˆ๋กœ์šด ์ฝ˜์…‰ํŠธ๋ฅผ ์„ฑ๊ณต์ ์œผ๋กœ ํฌ์ฐฉํ•˜๋ฉด ๋””ํ“จ์ „ ๋ชจ๋ธ์— ๋” ์œ ์šฉํ•œ ์ •๋ณด๋ฅผ ์ œ๊ณตํ•˜๊ณ  ๋…ธ์ด์ฆˆ๊ฐ€ ์ ์€ ๋” ์„ ๋ช…ํ•œ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•˜๋Š” ๋ฐ ๋„์›€์ด ๋ฉ๋‹ˆ๋‹ค. ์ด๋Ÿฌํ•œ ์ตœ์ ํ™” ํ”„๋กœ์„ธ์Šค๋Š” ์ผ๋ฐ˜์ ์œผ๋กœ ๋‹ค์–‘ํ•œ ํ”„๋กฌํ”„ํŠธ์™€ ์ด๋ฏธ์ง€์— ์ˆ˜์ฒœ ๋ฒˆ์— ๋…ธ์ถœ๋จ์œผ๋กœ์จ ์ด๋ฃจ์–ด์ง‘๋‹ˆ๋‹ค.
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/training/distributed_inference.md
# ์—ฌ๋Ÿฌ GPU๋ฅผ ์‚ฌ์šฉํ•œ ๋ถ„์‚ฐ ์ถ”๋ก  ๋ถ„์‚ฐ ์„ค์ •์—์„œ๋Š” ์—ฌ๋Ÿฌ ๊ฐœ์˜ ํ”„๋กฌํ”„ํŠธ๋ฅผ ๋™์‹œ์— ์ƒ์„ฑํ•  ๋•Œ ์œ ์šฉํ•œ ๐Ÿค— [Accelerate](https://huggingface.co/docs/accelerate/index) ๋˜๋Š” [PyTorch Distributed](https://pytorch.org/tutorials/beginner/dist_overview.html)๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์—ฌ๋Ÿฌ GPU์—์„œ ์ถ”๋ก ์„ ์‹คํ–‰ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด ๊ฐ€์ด๋“œ์—์„œ๋Š” ๋ถ„์‚ฐ ์ถ”๋ก ์„ ์œ„ํ•ด ๐Ÿค— Accelerate์™€ PyTorch Distributed๋ฅผ ์‚ฌ์šฉํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ๋ณด์—ฌ๋“œ๋ฆฝ๋‹ˆ๋‹ค. ## ๐Ÿค— Accelerate ๐Ÿค— [Accelerate](https://huggingface.co/docs/accelerate/index)๋Š” ๋ถ„์‚ฐ ์„ค์ •์—์„œ ์ถ”๋ก ์„ ์‰ฝ๊ฒŒ ํ›ˆ๋ จํ•˜๊ฑฐ๋‚˜ ์‹คํ–‰ํ•  ์ˆ˜ ์žˆ๋„๋ก ์„ค๊ณ„๋œ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ์ž…๋‹ˆ๋‹ค. ๋ถ„์‚ฐ ํ™˜๊ฒฝ ์„ค์ • ํ”„๋กœ์„ธ์Šค๋ฅผ ๊ฐ„์†Œํ™”ํ•˜์—ฌ PyTorch ์ฝ”๋“œ์— ์ง‘์ค‘ํ•  ์ˆ˜ ์žˆ๋„๋ก ํ•ด์ค๋‹ˆ๋‹ค. ์‹œ์ž‘ํ•˜๋ ค๋ฉด Python ํŒŒ์ผ์„ ์ƒ์„ฑํ•˜๊ณ  [`accelerate.PartialState`]๋ฅผ ์ดˆ๊ธฐํ™”ํ•˜์—ฌ ๋ถ„์‚ฐ ํ™˜๊ฒฝ์„ ์ƒ์„ฑํ•˜๋ฉด, ์„ค์ •์ด ์ž๋™์œผ๋กœ ๊ฐ์ง€๋˜๋ฏ€๋กœ `rank` ๋˜๋Š” `world_size`๋ฅผ ๋ช…์‹œ์ ์œผ๋กœ ์ •์˜ํ•  ํ•„์š”๊ฐ€ ์—†์Šต๋‹ˆ๋‹ค. ['DiffusionPipeline`]์„ `distributed_state.device`๋กœ ์ด๋™ํ•˜์—ฌ ๊ฐ ํ”„๋กœ์„ธ์Šค์— GPU๋ฅผ ํ• ๋‹นํ•ฉ๋‹ˆ๋‹ค. ์ด์ œ ์ปจํ…์ŠคํŠธ ๊ด€๋ฆฌ์ž๋กœ [`~accelerate.PartialState.split_between_processes`] ์œ ํ‹ธ๋ฆฌํ‹ฐ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ํ”„๋กœ์„ธ์Šค ์ˆ˜์— ๋”ฐ๋ผ ํ”„๋กฌํ”„ํŠธ๋ฅผ ์ž๋™์œผ๋กœ ๋ถ„๋ฐฐํ•ฉ๋‹ˆ๋‹ค. ```py from accelerate import PartialState from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) distributed_state = PartialState() pipeline.to(distributed_state.device) with distributed_state.split_between_processes(["a dog", "a cat"]) as prompt: result = pipeline(prompt).images[0] result.save(f"result_{distributed_state.process_index}.png") ``` Use the `--num_processes` argument to specify the number of GPUs to use, and call `accelerate launch` to run the script: ```bash accelerate launch run_distributed.py --num_processes=2 ``` <Tip>์ž์„ธํ•œ ๋‚ด์šฉ์€ [๐Ÿค— Accelerate๋ฅผ ์‚ฌ์šฉํ•œ ๋ถ„์‚ฐ ์ถ”๋ก ](https://huggingface.co/docs/accelerate/en/usage_guides/distributed_inference#distributed-inference-with-accelerate) ๊ฐ€์ด๋“œ๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. </Tip> ## Pytoerch ๋ถ„์‚ฐ PyTorch๋Š” ๋ฐ์ดํ„ฐ ๋ณ‘๋ ฌ ์ฒ˜๋ฆฌ๋ฅผ ๊ฐ€๋Šฅํ•˜๊ฒŒ ํ•˜๋Š” [`DistributedDataParallel`](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html)์„ ์ง€์›ํ•ฉ๋‹ˆ๋‹ค. ์‹œ์ž‘ํ•˜๋ ค๋ฉด Python ํŒŒ์ผ์„ ์ƒ์„ฑํ•˜๊ณ  `torch.distributed` ๋ฐ `torch.multiprocessing`์„ ์ž„ํฌํŠธํ•˜์—ฌ ๋ถ„์‚ฐ ํ”„๋กœ์„ธ์Šค ๊ทธ๋ฃน์„ ์„ค์ •ํ•˜๊ณ  ๊ฐ GPU์—์„œ ์ถ”๋ก ์šฉ ํ”„๋กœ์„ธ์Šค๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค. ๊ทธ๋ฆฌ๊ณ  [`DiffusionPipeline`]๋„ ์ดˆ๊ธฐํ™”ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค: ํ™•์‚ฐ ํŒŒ์ดํ”„๋ผ์ธ์„ `rank`๋กœ ์ด๋™ํ•˜๊ณ  `get_rank`๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ๊ฐ ํ”„๋กœ์„ธ์Šค์— GPU๋ฅผ ํ• ๋‹นํ•˜๋ฉด ๊ฐ ํ”„๋กœ์„ธ์Šค๊ฐ€ ๋‹ค๋ฅธ ํ”„๋กฌํ”„ํŠธ๋ฅผ ์ฒ˜๋ฆฌํ•ฉ๋‹ˆ๋‹ค: ```py import torch import torch.distributed as dist import torch.multiprocessing as mp from diffusers import DiffusionPipeline sd = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) ``` ์‚ฌ์šฉํ•  ๋ฐฑ์—”๋“œ ์œ ํ˜•, ํ˜„์žฌ ํ”„๋กœ์„ธ์Šค์˜ `rank`, `world_size` ๋˜๋Š” ์ฐธ์—ฌํ•˜๋Š” ํ”„๋กœ์„ธ์Šค ์ˆ˜๋กœ ๋ถ„์‚ฐ ํ™˜๊ฒฝ ์ƒ์„ฑ์„ ์ฒ˜๋ฆฌํ•˜๋Š” ํ•จ์ˆ˜[`init_process_group`]๋ฅผ ๋งŒ๋“ค์–ด ์ถ”๋ก ์„ ์‹คํ–‰ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. 2๊ฐœ์˜ GPU์—์„œ ์ถ”๋ก ์„ ๋ณ‘๋ ฌ๋กœ ์‹คํ–‰ํ•˜๋Š” ๊ฒฝ์šฐ `world_size`๋Š” 2์ž…๋‹ˆ๋‹ค. ```py def run_inference(rank, world_size): dist.init_process_group("nccl", rank=rank, world_size=world_size) sd.to(rank) if torch.distributed.get_rank() == 0: prompt = "a dog" elif torch.distributed.get_rank() == 1: prompt = "a cat" image = sd(prompt).images[0] image.save(f"./{'_'.join(prompt)}.png") ``` ๋ถ„์‚ฐ ์ถ”๋ก ์„ ์‹คํ–‰ํ•˜๋ ค๋ฉด [`mp.spawn`](https://pytorch.org/docs/stable/multiprocessing.html#torch.multiprocessing.spawn)์„ ํ˜ธ์ถœํ•˜์—ฌ `world_size`์— ์ •์˜๋œ GPU ์ˆ˜์— ๋Œ€ํ•ด `run_inference` ํ•จ์ˆ˜๋ฅผ ์‹คํ–‰ํ•ฉ๋‹ˆ๋‹ค: ```py def main(): world_size = 2 mp.spawn(run_inference, args=(world_size,), nprocs=world_size, join=True) if __name__ == "__main__": main() ``` ์ถ”๋ก  ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์™„๋ฃŒํ–ˆ์œผ๋ฉด `--nproc_per_node` ์ธ์ˆ˜๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์‚ฌ์šฉํ•  GPU ์ˆ˜๋ฅผ ์ง€์ •ํ•˜๊ณ  `torchrun`์„ ํ˜ธ์ถœํ•˜์—ฌ ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์‹คํ–‰ํ•ฉ๋‹ˆ๋‹ค: ```bash torchrun run_distributed.py --nproc_per_node=2 ```
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/training/text2image.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Text-to-image <Tip warning={true}> text-to-image ํŒŒ์ธํŠœ๋‹ ์Šคํฌ๋ฆฝํŠธ๋Š” experimental ์ƒํƒœ์ž…๋‹ˆ๋‹ค. ๊ณผ์ ํ•ฉํ•˜๊ธฐ ์‰ฝ๊ณ  ์น˜๋ช…์ ์ธ ๋ง๊ฐ๊ณผ ๊ฐ™์€ ๋ฌธ์ œ์— ๋ถ€๋”ชํžˆ๊ธฐ ์‰ฝ์Šต๋‹ˆ๋‹ค. ์ž์ฒด ๋ฐ์ดํ„ฐ์…‹์—์„œ ์ตœ์ƒ์˜ ๊ฒฐ๊ณผ๋ฅผ ์–ป์œผ๋ ค๋ฉด ๋‹ค์–‘ํ•œ ํ•˜์ดํผํŒŒ๋ผ๋ฏธํ„ฐ๋ฅผ ํƒ์ƒ‰ํ•˜๋Š” ๊ฒƒ์ด ์ข‹์Šต๋‹ˆ๋‹ค. </Tip> Stable Diffusion๊ณผ ๊ฐ™์€ text-to-image ๋ชจ๋ธ์€ ํ…์ŠคํŠธ ํ”„๋กฌํ”„ํŠธ์—์„œ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค. ์ด ๊ฐ€์ด๋“œ๋Š” PyTorch ๋ฐ Flax๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์ž์ฒด ๋ฐ์ดํ„ฐ์…‹์—์„œ [`CompVis/stable-diffusion-v1-4`](https://huggingface.co/CompVis/stable-diffusion-v1-4) ๋ชจ๋ธ๋กœ ํŒŒ์ธํŠœ๋‹ํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ๋ณด์—ฌ์ค๋‹ˆ๋‹ค. ์ด ๊ฐ€์ด๋“œ์— ์‚ฌ์šฉ๋œ text-to-image ํŒŒ์ธํŠœ๋‹์„ ์œ„ํ•œ ๋ชจ๋“  ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ์— ๊ด€์‹ฌ์ด ์žˆ๋Š” ๊ฒฝ์šฐ ์ด [๋ฆฌํฌ์ง€ํ† ๋ฆฌ](https://github.com/huggingface/diffusers/tree/main/examples/text_to_image)์—์„œ ์ž์„ธํžˆ ์ฐพ์„ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์‹คํ–‰ํ•˜๊ธฐ ์ „์—, ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ์˜ ํ•™์Šต dependency๋“ค์„ ์„ค์น˜ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค: ```bash pip install git+https://github.com/huggingface/diffusers.git pip install -U -r requirements.txt ``` ๊ทธ๋ฆฌ๊ณ  [๐Ÿค—Accelerate](https://github.com/huggingface/accelerate/) ํ™˜๊ฒฝ์„ ์ดˆ๊ธฐํ™”ํ•ฉ๋‹ˆ๋‹ค: ```bash accelerate config ``` ๋ฆฌํฌ์ง€ํ† ๋ฆฌ๋ฅผ ์ด๋ฏธ ๋ณต์ œํ•œ ๊ฒฝ์šฐ, ์ด ๋‹จ๊ณ„๋ฅผ ์ˆ˜ํ–‰ํ•  ํ•„์š”๊ฐ€ ์—†์Šต๋‹ˆ๋‹ค. ๋Œ€์‹ , ๋กœ์ปฌ ์ฒดํฌ์•„์›ƒ ๊ฒฝ๋กœ๋ฅผ ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ์— ๋ช…์‹œํ•  ์ˆ˜ ์žˆ์œผ๋ฉฐ ๊ฑฐ๊ธฐ์—์„œ ๋กœ๋“œ๋ฉ๋‹ˆ๋‹ค. ### ํ•˜๋“œ์›จ์–ด ์š”๊ตฌ ์‚ฌํ•ญ `gradient_checkpointing` ๋ฐ `mixed_precision`์„ ์‚ฌ์šฉํ•˜๋ฉด ๋‹จ์ผ 24GB GPU์—์„œ ๋ชจ๋ธ์„ ํŒŒ์ธํŠœ๋‹ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋” ๋†’์€ `batch_size`์™€ ๋” ๋น ๋ฅธ ํ›ˆ๋ จ์„ ์œ„ํ•ด์„œ๋Š” GPU ๋ฉ”๋ชจ๋ฆฌ๊ฐ€ 30GB ์ด์ƒ์ธ GPU๋ฅผ ์‚ฌ์šฉํ•˜๋Š” ๊ฒƒ์ด ์ข‹์Šต๋‹ˆ๋‹ค. TPU ๋˜๋Š” GPU์—์„œ ํŒŒ์ธํŠœ๋‹์„ ์œ„ํ•ด JAX๋‚˜ Flax๋ฅผ ์‚ฌ์šฉํ•  ์ˆ˜๋„ ์žˆ์Šต๋‹ˆ๋‹ค. ์ž์„ธํ•œ ๋‚ด์šฉ์€ [์•„๋ž˜](#flax-jax-finetuning)๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. xFormers๋กœ memory efficient attention์„ ํ™œ์„ฑํ™”ํ•˜์—ฌ ๋ฉ”๋ชจ๋ฆฌ ์‚ฌ์šฉ๋Ÿ‰ ํ›จ์”ฌ ๋” ์ค„์ผ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. [xFormers๊ฐ€ ์„ค์น˜](./optimization/xformers)๋˜์–ด ์žˆ๋Š”์ง€ ํ™•์ธํ•˜๊ณ  `--enable_xformers_memory_efficient_attention`๋ฅผ ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ์— ๋ช…์‹œํ•ฉ๋‹ˆ๋‹ค. xFormers๋Š” Flax์— ์‚ฌ์šฉํ•  ์ˆ˜ ์—†์Šต๋‹ˆ๋‹ค. ## Hub์— ๋ชจ๋ธ ์—…๋กœ๋“œํ•˜๊ธฐ ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ์— ๋‹ค์Œ ์ธ์ˆ˜๋ฅผ ์ถ”๊ฐ€ํ•˜์—ฌ ๋ชจ๋ธ์„ ํ—ˆ๋ธŒ์— ์ €์žฅํ•ฉ๋‹ˆ๋‹ค: ```bash --push_to_hub ``` ## ์ฒดํฌํฌ์ธํŠธ ์ €์žฅ ๋ฐ ๋ถˆ๋Ÿฌ์˜ค๊ธฐ ํ•™์Šต ์ค‘ ๋ฐœ์ƒํ•  ์ˆ˜ ์žˆ๋Š” ์ผ์— ๋Œ€๋น„ํ•˜์—ฌ ์ •๊ธฐ์ ์œผ๋กœ ์ฒดํฌํฌ์ธํŠธ๋ฅผ ์ €์žฅํ•ด ๋‘๋Š” ๊ฒƒ์ด ์ข‹์Šต๋‹ˆ๋‹ค. ์ฒดํฌํฌ์ธํŠธ๋ฅผ ์ €์žฅํ•˜๋ ค๋ฉด ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ์— ๋‹ค์Œ ์ธ์ˆ˜๋ฅผ ๋ช…์‹œํ•ฉ๋‹ˆ๋‹ค. ```bash --checkpointing_steps=500 ``` 500์Šคํ…๋งˆ๋‹ค ์ „์ฒด ํ•™์Šต state๊ฐ€ 'output_dir'์˜ ํ•˜์œ„ ํด๋”์— ์ €์žฅ๋ฉ๋‹ˆ๋‹ค. ์ฒดํฌํฌ์ธํŠธ๋Š” 'checkpoint-'์— ์ง€๊ธˆ๊นŒ์ง€ ํ•™์Šต๋œ step ์ˆ˜์ž…๋‹ˆ๋‹ค. ์˜ˆ๋ฅผ ๋“ค์–ด 'checkpoint-1500'์€ 1500 ํ•™์Šต step ํ›„์— ์ €์žฅ๋œ ์ฒดํฌํฌ์ธํŠธ์ž…๋‹ˆ๋‹ค. ํ•™์Šต์„ ์žฌ๊ฐœํ•˜๊ธฐ ์œ„ํ•ด ์ฒดํฌํฌ์ธํŠธ๋ฅผ ๋ถˆ๋Ÿฌ์˜ค๋ ค๋ฉด '--resume_from_checkpoint' ์ธ์ˆ˜๋ฅผ ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ์— ๋ช…์‹œํ•˜๊ณ  ์žฌ๊ฐœํ•  ์ฒดํฌํฌ์ธํŠธ๋ฅผ ์ง€์ •ํ•˜์‹ญ์‹œ์˜ค. ์˜ˆ๋ฅผ ๋“ค์–ด ๋‹ค์Œ ์ธ์ˆ˜๋Š” 1500๊ฐœ์˜ ํ•™์Šต step ํ›„์— ์ €์žฅ๋œ ์ฒดํฌํฌ์ธํŠธ์—์„œ๋ถ€ํ„ฐ ํ›ˆ๋ จ์„ ์žฌ๊ฐœํ•ฉ๋‹ˆ๋‹ค. ```bash --resume_from_checkpoint="checkpoint-1500" ``` ## ํŒŒ์ธํŠœ๋‹ <frameworkcontent> <pt> ๋‹ค์Œ๊ณผ ๊ฐ™์ด [Pokรฉmon BLIP ์บก์…˜](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions) ๋ฐ์ดํ„ฐ์…‹์—์„œ ํŒŒ์ธํŠœ๋‹ ์‹คํ–‰์„ ์œ„ํ•ด [PyTorch ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py)๋ฅผ ์‹คํ–‰ํ•ฉ๋‹ˆ๋‹ค: ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export dataset_name="lambdalabs/pokemon-blip-captions" accelerate launch train_text_to_image.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$dataset_name \ --use_ema \ --resolution=512 --center_crop --random_flip \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --mixed_precision="fp16" \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --lr_scheduler="constant" --lr_warmup_steps=0 \ --output_dir="sd-pokemon-model" ``` ์ž์ฒด ๋ฐ์ดํ„ฐ์…‹์œผ๋กœ ํŒŒ์ธํŠœ๋‹ํ•˜๋ ค๋ฉด ๐Ÿค— [Datasets](https://huggingface.co/docs/datasets/index)์—์„œ ์š”๊ตฌํ•˜๋Š” ํ˜•์‹์— ๋”ฐ๋ผ ๋ฐ์ดํ„ฐ์…‹์„ ์ค€๋น„ํ•˜์„ธ์š”. [๋ฐ์ดํ„ฐ์…‹์„ ํ—ˆ๋ธŒ์— ์—…๋กœ๋“œ](https://huggingface.co/docs/datasets/image_dataset#upload-dataset-to-the-hub)ํ•˜๊ฑฐ๋‚˜ [ํŒŒ์ผ๋“ค์ด ์žˆ๋Š” ๋กœ์ปฌ ํด๋”๋ฅผ ์ค€๋น„](https ://huggingface.co/docs/datasets/image_dataset#imagefolder)ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์‚ฌ์šฉ์ž ์ปค์Šคํ…€ loading logic์„ ์‚ฌ์šฉํ•˜๋ ค๋ฉด ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์ˆ˜์ •ํ•˜์‹ญ์‹œ์˜ค. ๋„์›€์ด ๋˜๋„๋ก ์ฝ”๋“œ์˜ ์ ์ ˆํ•œ ์œ„์น˜์— ํฌ์ธํ„ฐ๋ฅผ ๋‚จ๊ฒผ์Šต๋‹ˆ๋‹ค. ๐Ÿค— ์•„๋ž˜ ์˜ˆ์ œ ์Šคํฌ๋ฆฝํŠธ๋Š” `TRAIN_DIR`์˜ ๋กœ์ปฌ ๋ฐ์ดํ„ฐ์…‹์œผ๋กœ๋ฅผ ํŒŒ์ธํŠœ๋‹ํ•˜๋Š” ๋ฐฉ๋ฒ•๊ณผ `OUTPUT_DIR`์—์„œ ๋ชจ๋ธ์„ ์ €์žฅํ•  ์œ„์น˜๋ฅผ ๋ณด์—ฌ์ค๋‹ˆ๋‹ค: ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export TRAIN_DIR="path_to_your_dataset" export OUTPUT_DIR="path_to_save_model" accelerate launch train_text_to_image.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$TRAIN_DIR \ --use_ema \ --resolution=512 --center_crop --random_flip \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --mixed_precision="fp16" \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --lr_scheduler="constant" --lr_warmup_steps=0 \ --output_dir=${OUTPUT_DIR} ``` </pt> <jax> [@duongna211](https://github.com/duongna21)์˜ ๊ธฐ์—ฌ๋กœ, Flax๋ฅผ ์‚ฌ์šฉํ•ด TPU ๋ฐ GPU์—์„œ Stable Diffusion ๋ชจ๋ธ์„ ๋” ๋น ๋ฅด๊ฒŒ ํ•™์Šตํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด๋Š” TPU ํ•˜๋“œ์›จ์–ด์—์„œ ๋งค์šฐ ํšจ์œจ์ ์ด์ง€๋งŒ GPU์—์„œ๋„ ํ›Œ๋ฅญํ•˜๊ฒŒ ์ž‘๋™ํ•ฉ๋‹ˆ๋‹ค. Flax ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ๋Š” gradient checkpointing๋‚˜ gradient accumulation๊ณผ ๊ฐ™์€ ๊ธฐ๋Šฅ์„ ์•„์ง ์ง€์›ํ•˜์ง€ ์•Š์œผ๋ฏ€๋กœ ๋ฉ”๋ชจ๋ฆฌ๊ฐ€ 30GB ์ด์ƒ์ธ GPU ๋˜๋Š” TPU v3๊ฐ€ ํ•„์š”ํ•ฉ๋‹ˆ๋‹ค. ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์‹คํ–‰ํ•˜๊ธฐ ์ „์— ์š”๊ตฌ ์‚ฌํ•ญ์ด ์„ค์น˜๋˜์–ด ์žˆ๋Š”์ง€ ํ™•์ธํ•˜์‹ญ์‹œ์˜ค: ```bash pip install -U -r requirements_flax.txt ``` ๊ทธ๋Ÿฌ๋ฉด ๋‹ค์Œ๊ณผ ๊ฐ™์ด [Flax ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_flax.py)๋ฅผ ์‹คํ–‰ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ```bash export MODEL_NAME="runwayml/stable-diffusion-v1-5" export dataset_name="lambdalabs/pokemon-blip-captions" python train_text_to_image_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$dataset_name \ --resolution=512 --center_crop --random_flip \ --train_batch_size=1 \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --output_dir="sd-pokemon-model" ``` ์ž์ฒด ๋ฐ์ดํ„ฐ์…‹์œผ๋กœ ํŒŒ์ธํŠœ๋‹ํ•˜๋ ค๋ฉด ๐Ÿค— [Datasets](https://huggingface.co/docs/datasets/index)์—์„œ ์š”๊ตฌํ•˜๋Š” ํ˜•์‹์— ๋”ฐ๋ผ ๋ฐ์ดํ„ฐ์…‹์„ ์ค€๋น„ํ•˜์„ธ์š”. [๋ฐ์ดํ„ฐ์…‹์„ ํ—ˆ๋ธŒ์— ์—…๋กœ๋“œ](https://huggingface.co/docs/datasets/image_dataset#upload-dataset-to-the-hub)ํ•˜๊ฑฐ๋‚˜ [ํŒŒ์ผ๋“ค์ด ์žˆ๋Š” ๋กœ์ปฌ ํด๋”๋ฅผ ์ค€๋น„](https ://huggingface.co/docs/datasets/image_dataset#imagefolder)ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์‚ฌ์šฉ์ž ์ปค์Šคํ…€ loading logic์„ ์‚ฌ์šฉํ•˜๋ ค๋ฉด ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์ˆ˜์ •ํ•˜์‹ญ์‹œ์˜ค. ๋„์›€์ด ๋˜๋„๋ก ์ฝ”๋“œ์˜ ์ ์ ˆํ•œ ์œ„์น˜์— ํฌ์ธํ„ฐ๋ฅผ ๋‚จ๊ฒผ์Šต๋‹ˆ๋‹ค. ๐Ÿค— ์•„๋ž˜ ์˜ˆ์ œ ์Šคํฌ๋ฆฝํŠธ๋Š” `TRAIN_DIR`์˜ ๋กœ์ปฌ ๋ฐ์ดํ„ฐ์…‹์œผ๋กœ๋ฅผ ํŒŒ์ธํŠœ๋‹ํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ๋ณด์—ฌ์ค๋‹ˆ๋‹ค: ```bash export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" export TRAIN_DIR="path_to_your_dataset" python train_text_to_image_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_data_dir=$TRAIN_DIR \ --resolution=512 --center_crop --random_flip \ --train_batch_size=1 \ --mixed_precision="fp16" \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --output_dir="sd-pokemon-model" ``` </jax> </frameworkcontent> ## LoRA Text-to-image ๋ชจ๋ธ ํŒŒ์ธํŠœ๋‹์„ ์œ„ํ•ด, ๋Œ€๊ทœ๋ชจ ๋ชจ๋ธ ํ•™์Šต์„ ๊ฐ€์†ํ™”ํ•˜๊ธฐ ์œ„ํ•œ ํŒŒ์ธํŠœ๋‹ ๊ธฐ์ˆ ์ธ LoRA(Low-Rank Adaptation of Large Language Models)๋ฅผ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ž์„ธํ•œ ๋‚ด์šฉ์€ [LoRA ํ•™์Šต](lora#text-to-image) ๊ฐ€์ด๋“œ๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. ## ์ถ”๋ก  ํ—ˆ๋ธŒ์˜ ๋ชจ๋ธ ๊ฒฝ๋กœ ๋˜๋Š” ๋ชจ๋ธ ์ด๋ฆ„์„ [`StableDiffusionPipeline`]์— ์ „๋‹ฌํ•˜์—ฌ ์ถ”๋ก ์„ ์œ„ํ•ด ํŒŒ์ธ ํŠœ๋‹๋œ ๋ชจ๋ธ์„ ๋ถˆ๋Ÿฌ์˜ฌ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: <frameworkcontent> <pt> ```python from diffusers import StableDiffusionPipeline model_path = "path_to_saved_model" pipe = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=torch.float16) pipe.to("cuda") image = pipe(prompt="yoda").images[0] image.save("yoda-pokemon.png") ``` </pt> <jax> ```python import jax import numpy as np from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxStableDiffusionPipeline model_path = "path_to_saved_model" pipe, params = FlaxStableDiffusionPipeline.from_pretrained(model_path, dtype=jax.numpy.bfloat16) prompt = "yoda pokemon" prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, jax.device_count()) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) image.save("yoda-pokemon.png") ``` </jax> </frameworkcontent>
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/training/instructpix2pix.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # InstructPix2Pix [InstructPix2Pix](https://arxiv.org/abs/2211.09800)๋Š” text-conditioned diffusion ๋ชจ๋ธ์ด ํ•œ ์ด๋ฏธ์ง€์— ํŽธ์ง‘์„ ๋”ฐ๋ฅผ ์ˆ˜ ์žˆ๋„๋ก ํŒŒ์ธํŠœ๋‹ํ•˜๋Š” ๋ฐฉ๋ฒ•์ž…๋‹ˆ๋‹ค. ์ด ๋ฐฉ๋ฒ•์„ ์‚ฌ์šฉํ•˜์—ฌ ํŒŒ์ธํŠœ๋‹๋œ ๋ชจ๋ธ์€ ๋‹ค์Œ์„ ์ž…๋ ฅ์œผ๋กœ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค: <p align="center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/evaluation_diffusion_models/edit-instruction.png" alt="instructpix2pix-inputs" width=600/> </p> ์ถœ๋ ฅ์€ ์ž…๋ ฅ ์ด๋ฏธ์ง€์— ํŽธ์ง‘ ์ง€์‹œ๊ฐ€ ๋ฐ˜์˜๋œ "์ˆ˜์ •๋œ" ์ด๋ฏธ์ง€์ž…๋‹ˆ๋‹ค: <p align="center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/output-gs%407-igs%401-steps%4050.png" alt="instructpix2pix-output" width=600/> </p> `train_instruct_pix2pix.py` ์Šคํฌ๋ฆฝํŠธ([์—ฌ๊ธฐ](https://github.com/huggingface/diffusers/blob/main/examples/instruct_pix2pix/train_instruct_pix2pix.py)์—์„œ ์ฐพ์„ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค.)๋Š” ํ•™์Šต ์ ˆ์ฐจ๋ฅผ ์„ค๋ช…ํ•˜๊ณ  Stable Diffusion์— ์ ์šฉํ•  ์ˆ˜ ์žˆ๋Š” ๋ฐฉ๋ฒ•์„ ๋ณด์—ฌ์ค๋‹ˆ๋‹ค. *** `train_instruct_pix2pix.py`๋Š” [์›๋ž˜ ๊ตฌํ˜„](https://github.com/timothybrooks/instruct-pix2pix)์— ์ถฉ์‹คํ•˜๋ฉด์„œ InstructPix2Pix ํ•™์Šต ์ ˆ์ฐจ๋ฅผ ๊ตฌํ˜„ํ•˜๊ณ  ์žˆ์ง€๋งŒ, [์†Œ๊ทœ๋ชจ ๋ฐ์ดํ„ฐ์…‹](https://huggingface.co/datasets/fusing/instructpix2pix-1000-samples)์—์„œ๋งŒ ํ…Œ์ŠคํŠธ๋ฅผ ํ–ˆ์Šต๋‹ˆ๋‹ค. ์ด๋Š” ์ตœ์ข… ๊ฒฐ๊ณผ์— ์˜ํ–ฅ์„ ๋ผ์น  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋” ๋‚˜์€ ๊ฒฐ๊ณผ๋ฅผ ์œ„ํ•ด, ๋” ํฐ ๋ฐ์ดํ„ฐ์…‹์—์„œ ๋” ๊ธธ๊ฒŒ ํ•™์Šตํ•˜๋Š” ๊ฒƒ์„ ๊ถŒ์žฅํ•ฉ๋‹ˆ๋‹ค. [์—ฌ๊ธฐ](https://huggingface.co/datasets/timbrooks/instructpix2pix-clip-filtered)์—์„œ InstructPix2Pix ํ•™์Šต์„ ์œ„ํ•ด ํฐ ๋ฐ์ดํ„ฐ์…‹์„ ์ฐพ์„ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. *** ## PyTorch๋กœ ๋กœ์ปฌ์—์„œ ์‹คํ–‰ํ•˜๊ธฐ ### ์ข…์†์„ฑ(dependencies) ์„ค์น˜ํ•˜๊ธฐ ์ด ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์‹คํ–‰ํ•˜๊ธฐ ์ „์—, ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ์˜ ํ•™์Šต ์ข…์†์„ฑ์„ ์„ค์น˜ํ•˜์„ธ์š”: **์ค‘์š”** ์ตœ์‹  ๋ฒ„์ „์˜ ์˜ˆ์ œ ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์„ฑ๊ณต์ ์œผ๋กœ ์‹คํ–‰ํ•˜๊ธฐ ์œ„ํ•ด, **์›๋ณธ์œผ๋กœ๋ถ€ํ„ฐ ์„ค์น˜**ํ•˜๋Š” ๊ฒƒ๊ณผ ์˜ˆ์ œ ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์ž์ฃผ ์—…๋ฐ์ดํŠธํ•˜๊ณ  ์˜ˆ์ œ๋ณ„ ์š”๊ตฌ์‚ฌํ•ญ์„ ์„ค์น˜ํ•˜๊ธฐ ๋•Œ๋ฌธ์— ์ตœ์‹  ์ƒํƒœ๋กœ ์œ ์ง€ํ•˜๋Š” ๊ฒƒ์„ ๊ถŒ์žฅํ•ฉ๋‹ˆ๋‹ค. ์ด๋ฅผ ์œ„ํ•ด, ์ƒˆ๋กœ์šด ๊ฐ€์ƒ ํ™˜๊ฒฝ์—์„œ ๋‹ค์Œ ์Šคํ…์„ ์‹คํ–‰ํ•˜์„ธ์š”: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install -e . ``` cd ๋ช…๋ น์–ด๋กœ ์˜ˆ์ œ ํด๋”๋กœ ์ด๋™ํ•˜์„ธ์š”. ```bash cd examples/instruct_pix2pix ``` ์ด์ œ ์‹คํ–‰ํ•˜์„ธ์š”. ```bash pip install -r requirements.txt ``` ๊ทธ๋ฆฌ๊ณ  [๐Ÿค—Accelerate](https://github.com/huggingface/accelerate/) ํ™˜๊ฒฝ์—์„œ ์ดˆ๊ธฐํ™”ํ•˜์„ธ์š”: ```bash accelerate config ``` ํ˜น์€ ํ™˜๊ฒฝ์— ๋Œ€ํ•œ ์งˆ๋ฌธ ์—†์ด ๊ธฐ๋ณธ์ ์ธ accelerate ๊ตฌ์„ฑ์„ ์‚ฌ์šฉํ•˜๋ ค๋ฉด ๋‹ค์Œ์„ ์‹คํ–‰ํ•˜์„ธ์š”. ```bash accelerate config default ``` ํ˜น์€ ์‚ฌ์šฉ ์ค‘์ธ ํ™˜๊ฒฝ์ด notebook๊ณผ ๊ฐ™์€ ๋Œ€ํ™”ํ˜• ์‰˜์€ ์ง€์›ํ•˜์ง€ ์•Š๋Š” ๊ฒฝ์šฐ๋Š” ๋‹ค์Œ ์ ˆ์ฐจ๋ฅผ ๋”ฐ๋ผ์ฃผ์„ธ์š”. ```python from accelerate.utils import write_basic_config write_basic_config() ``` ### ์˜ˆ์‹œ ์ด์ „์— ์–ธ๊ธ‰ํ–ˆ๋“ฏ์ด, ํ•™์Šต์„ ์œ„ํ•ด [์ž‘์€ ๋ฐ์ดํ„ฐ์…‹](https://huggingface.co/datasets/fusing/instructpix2pix-1000-samples)์„ ์‚ฌ์šฉํ•  ๊ฒƒ์ž…๋‹ˆ๋‹ค. ๊ทธ ๋ฐ์ดํ„ฐ์…‹์€ InstructPix2Pix ๋…ผ๋ฌธ์—์„œ ์‚ฌ์šฉ๋œ [์›๋ž˜์˜ ๋ฐ์ดํ„ฐ์…‹](https://huggingface.co/datasets/timbrooks/instructpix2pix-clip-filtered)๋ณด๋‹ค ์ž‘์€ ๋ฒ„์ „์ž…๋‹ˆ๋‹ค. ์ž์‹ ์˜ ๋ฐ์ดํ„ฐ์…‹์„ ์‚ฌ์šฉํ•˜๊ธฐ ์œ„ํ•ด, [ํ•™์Šต์„ ์œ„ํ•œ ๋ฐ์ดํ„ฐ์…‹ ๋งŒ๋“ค๊ธฐ](create_dataset) ๊ฐ€์ด๋“œ๋ฅผ ์ฐธ๊ณ ํ•˜์„ธ์š”. `MODEL_NAME` ํ™˜๊ฒฝ ๋ณ€์ˆ˜(ํ—ˆ๋ธŒ ๋ชจ๋ธ ๋ ˆํฌ์ง€ํ† ๋ฆฌ ๋˜๋Š” ๋ชจ๋ธ ๊ฐ€์ค‘์น˜๊ฐ€ ํฌํ•จ๋œ ํด๋” ๊ฒฝ๋กœ)๋ฅผ ์ง€์ •ํ•˜๊ณ  [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) ์ธ์ˆ˜์— ์ „๋‹ฌํ•ฉ๋‹ˆ๋‹ค. `DATASET_ID`์— ๋ฐ์ดํ„ฐ์…‹ ์ด๋ฆ„์„ ์ง€์ •ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค: ```bash export MODEL_NAME="runwayml/stable-diffusion-v1-5" export DATASET_ID="fusing/instructpix2pix-1000-samples" ``` ์ง€๊ธˆ, ํ•™์Šต์„ ์‹คํ–‰ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์Šคํฌ๋ฆฝํŠธ๋Š” ๋ ˆํฌ์ง€ํ† ๋ฆฌ์˜ ํ•˜์œ„ ํด๋”์˜ ๋ชจ๋“  ๊ตฌ์„ฑ์š”์†Œ(`feature_extractor`, `scheduler`, `text_encoder`, `unet` ๋“ฑ)๋ฅผ ์ €์žฅํ•ฉ๋‹ˆ๋‹ค. ```bash accelerate launch --mixed_precision="fp16" train_instruct_pix2pix.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_ID \ --enable_xformers_memory_efficient_attention \ --resolution=256 --random_flip \ --train_batch_size=4 --gradient_accumulation_steps=4 --gradient_checkpointing \ --max_train_steps=15000 \ --checkpointing_steps=5000 --checkpoints_total_limit=1 \ --learning_rate=5e-05 --max_grad_norm=1 --lr_warmup_steps=0 \ --conditioning_dropout_prob=0.05 \ --mixed_precision=fp16 \ --seed=42 \ --push_to_hub ``` ์ถ”๊ฐ€์ ์œผ๋กœ, ๊ฐ€์ค‘์น˜์™€ ๋ฐ”์ด์–ด์Šค๋ฅผ ํ•™์Šต ๊ณผ์ •์— ๋ชจ๋‹ˆํ„ฐ๋งํ•˜์—ฌ ๊ฒ€์ฆ ์ถ”๋ก ์„ ์ˆ˜ํ–‰ํ•˜๋Š” ๊ฒƒ์„ ์ง€์›ํ•ฉ๋‹ˆ๋‹ค. `report_to="wandb"`์™€ ์ด ๊ธฐ๋Šฅ์„ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```bash accelerate launch --mixed_precision="fp16" train_instruct_pix2pix.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_ID \ --enable_xformers_memory_efficient_attention \ --resolution=256 --random_flip \ --train_batch_size=4 --gradient_accumulation_steps=4 --gradient_checkpointing \ --max_train_steps=15000 \ --checkpointing_steps=5000 --checkpoints_total_limit=1 \ --learning_rate=5e-05 --max_grad_norm=1 --lr_warmup_steps=0 \ --conditioning_dropout_prob=0.05 \ --mixed_precision=fp16 \ --val_image_url="https://hf.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png" \ --validation_prompt="make the mountains snowy" \ --seed=42 \ --report_to=wandb \ --push_to_hub ``` ๋ชจ๋ธ ๋””๋ฒ„๊น…์— ์œ ์šฉํ•œ ์ด ํ‰๊ฐ€ ๋ฐฉ๋ฒ• ๊ถŒ์žฅํ•ฉ๋‹ˆ๋‹ค. ์ด๋ฅผ ์‚ฌ์šฉํ•˜๊ธฐ ์œ„ํ•ด `wandb`๋ฅผ ์„ค์น˜ํ•˜๋Š” ๊ฒƒ์„ ์ฃผ๋ชฉํ•ด์ฃผ์„ธ์š”. `pip install wandb`๋กœ ์‹คํ–‰ํ•ด `wandb`๋ฅผ ์„ค์น˜ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. [์—ฌ๊ธฐ](https://wandb.ai/sayakpaul/instruct-pix2pix/runs/ctr3kovq), ๋ช‡ ๊ฐ€์ง€ ํ‰๊ฐ€ ๋ฐฉ๋ฒ•๊ณผ ํ•™์Šต ํŒŒ๋ผ๋ฏธํ„ฐ๋ฅผ ํฌํ•จํ•˜๋Š” ์˜ˆ์‹œ๋ฅผ ๋ณผ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ***์ฐธ๊ณ : ์›๋ณธ ๋…ผ๋ฌธ์—์„œ, ์ €์ž๋“ค์€ 256x256 ์ด๋ฏธ์ง€ ํ•ด์ƒ๋„๋กœ ํ•™์Šตํ•œ ๋ชจ๋ธ๋กœ 512x512์™€ ๊ฐ™์€ ๋” ํฐ ํ•ด์ƒ๋„๋กœ ์ž˜ ์ผ๋ฐ˜ํ™”๋˜๋Š” ๊ฒƒ์„ ๋ณผ ์ˆ˜ ์žˆ์—ˆ์Šต๋‹ˆ๋‹ค. ์ด๋Š” ํ•™์Šต์— ์‚ฌ์šฉํ•œ ํฐ ๋ฐ์ดํ„ฐ์…‹์„ ์‚ฌ์šฉํ–ˆ๊ธฐ ๋•Œ๋ฌธ์ž…๋‹ˆ๋‹ค.*** ## ๋‹ค์ˆ˜์˜ GPU๋กœ ํ•™์Šตํ•˜๊ธฐ `accelerate`๋Š” ์›ํ™œํ•œ ๋‹ค์ˆ˜์˜ GPU๋กœ ํ•™์Šต์„ ๊ฐ€๋Šฅํ•˜๊ฒŒ ํ•ฉ๋‹ˆ๋‹ค. `accelerate`๋กœ ๋ถ„์‚ฐ ํ•™์Šต์„ ์‹คํ–‰ํ•˜๋Š” [์—ฌ๊ธฐ](https://huggingface.co/docs/accelerate/basic_tutorials/launch) ์„ค๋ช…์„ ๋”ฐ๋ผ ํ•ด ์ฃผ์‹œ๊ธฐ ๋ฐ”๋ž๋‹ˆ๋‹ค. ์˜ˆ์‹œ์˜ ๋ช…๋ น์–ด ์ž…๋‹ˆ๋‹ค: ```bash accelerate launch --mixed_precision="fp16" --multi_gpu train_instruct_pix2pix.py \ --pretrained_model_name_or_path=runwayml/stable-diffusion-v1-5 \ --dataset_name=sayakpaul/instructpix2pix-1000-samples \ --use_ema \ --enable_xformers_memory_efficient_attention \ --resolution=512 --random_flip \ --train_batch_size=4 --gradient_accumulation_steps=4 --gradient_checkpointing \ --max_train_steps=15000 \ --checkpointing_steps=5000 --checkpoints_total_limit=1 \ --learning_rate=5e-05 --lr_warmup_steps=0 \ --conditioning_dropout_prob=0.05 \ --mixed_precision=fp16 \ --seed=42 \ --push_to_hub ``` ## ์ถ”๋ก ํ•˜๊ธฐ ์ผ๋‹จ ํ•™์Šต์ด ์™„๋ฃŒ๋˜๋ฉด, ์ถ”๋ก  ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```python import PIL import requests import torch from diffusers import StableDiffusionInstructPix2PixPipeline model_id = "your_model_id" # <- ์ด๋ฅผ ์ˆ˜์ •ํ•˜์„ธ์š”. pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") generator = torch.Generator("cuda").manual_seed(0) url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/test_pix2pix_4.png" def download_image(url): image = PIL.Image.open(requests.get(url, stream=True).raw) image = PIL.ImageOps.exif_transpose(image) image = image.convert("RGB") return image image = download_image(url) prompt = "wipe out the lake" num_inference_steps = 20 image_guidance_scale = 1.5 guidance_scale = 10 edited_image = pipe( prompt, image=image, num_inference_steps=num_inference_steps, image_guidance_scale=image_guidance_scale, guidance_scale=guidance_scale, generator=generator, ).images[0] edited_image.save("edited_image.png") ``` ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์‚ฌ์šฉํ•ด ์–ป์€ ์˜ˆ์‹œ์˜ ๋ชจ๋ธ ๋ ˆํฌ์ง€ํ† ๋ฆฌ๋Š” ์—ฌ๊ธฐ [sayakpaul/instruct-pix2pix](https://huggingface.co/sayakpaul/instruct-pix2pix)์—์„œ ํ™•์ธํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์„ฑ๋Šฅ์„ ์œ„ํ•œ ์†๋„์™€ ํ’ˆ์งˆ์„ ์ œ์–ดํ•˜๊ธฐ ์œ„ํ•ด ์„ธ ๊ฐ€์ง€ ํŒŒ๋ผ๋ฏธํ„ฐ๋ฅผ ์‚ฌ์šฉํ•˜๋Š” ๊ฒƒ์ด ์ข‹์Šต๋‹ˆ๋‹ค: * `num_inference_steps` * `image_guidance_scale` * `guidance_scale` ํŠนํžˆ, `image_guidance_scale`์™€ `guidance_scale`๋Š” ์ƒ์„ฑ๋œ("์ˆ˜์ •๋œ") ์ด๋ฏธ์ง€์—์„œ ํฐ ์˜ํ–ฅ์„ ๋ฏธ์น  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค.([์—ฌ๊ธฐ](https://twitter.com/RisingSayak/status/1628392199196151808?s=20)์˜ˆ์‹œ๋ฅผ ์ฐธ๊ณ ํ•ด์ฃผ์„ธ์š”.) ๋งŒ์•ฝ InstructPix2Pix ํ•™์Šต ๋ฐฉ๋ฒ•์„ ์‚ฌ์šฉํ•ด ๋ช‡ ๊ฐ€์ง€ ํฅ๋ฏธ๋กœ์šด ๋ฐฉ๋ฒ•์„ ์ฐพ๊ณ  ์žˆ๋‹ค๋ฉด, ์ด ๋ธ”๋กœ๊ทธ ๊ฒŒ์‹œ๋ฌผ[Instruction-tuning Stable Diffusion with InstructPix2Pix](https://huggingface.co/blog/instruction-tuning-sd)์„ ํ™•์ธํ•ด์ฃผ์„ธ์š”.
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/training/overview.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ๐Ÿงจ Diffusers ํ•™์Šต ์˜ˆ์‹œ ์ด๋ฒˆ ์ฑ•ํ„ฐ์—์„œ๋Š” ๋‹ค์–‘ํ•œ ์œ ์ฆˆ์ผ€์ด์Šค๋“ค์— ๋Œ€ํ•œ ์˜ˆ์ œ ์ฝ”๋“œ๋“ค์„ ํ†ตํ•ด ์–ด๋–ป๊ฒŒํ•˜๋ฉด ํšจ๊ณผ์ ์œผ๋กœ `diffusers` ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๋ฅผ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์„๊นŒ์— ๋Œ€ํ•ด ์•Œ์•„๋ณด๋„๋ก ํ•˜๊ฒ ์Šต๋‹ˆ๋‹ค. **Note**: ํ˜น์‹œ ์˜คํ”ผ์…œํ•œ ์˜ˆ์‹œ์ฝ”๋“œ๋ฅผ ์ฐพ๊ณ  ์žˆ๋‹ค๋ฉด, [์—ฌ๊ธฐ](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines)๋ฅผ ์ฐธ๊ณ ํ•ด๋ณด์„ธ์š”! ์—ฌ๊ธฐ์„œ ๋‹ค๋ฃฐ ์˜ˆ์‹œ๋“ค์€ ๋‹ค์Œ์„ ์ง€ํ–ฅํ•ฉ๋‹ˆ๋‹ค. - **์†์‰ฌ์šด ๋””ํŽœ๋˜์‹œ ์„ค์น˜** (Self-contained) : ์—ฌ๊ธฐ์„œ ์‚ฌ์šฉ๋  ์˜ˆ์‹œ ์ฝ”๋“œ๋“ค์˜ ๋””ํŽœ๋˜์‹œ ํŒจํ‚ค์ง€๋“ค์€ ์ „๋ถ€ `pip install` ๋ช…๋ น์–ด๋ฅผ ํ†ตํ•ด ์„ค์น˜ ๊ฐ€๋Šฅํ•œ ํŒจํ‚ค์ง€๋“ค์ž…๋‹ˆ๋‹ค. ๋˜ํ•œ ์นœ์ ˆํ•˜๊ฒŒ `requirements.txt` ํŒŒ์ผ์— ํ•ด๋‹น ํŒจํ‚ค์ง€๋“ค์ด ๋ช…์‹œ๋˜์–ด ์žˆ์–ด, `pip install -r requirements.txt`๋กœ ๊ฐ„ํŽธํ•˜๊ฒŒ ํ•ด๋‹น ๋””ํŽœ๋˜์‹œ๋“ค์„ ์„ค์น˜ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์˜ˆ์‹œ: [train_unconditional.py](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/train_unconditional.py), [requirements.txt](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/requirements.txt) - **์†์‰ฌ์šด ์ˆ˜์ •** (Easy-to-tweak) : ์ €ํฌ๋Š” ๊ฐ€๋Šฅํ•˜๋ฉด ๋งŽ์€ ์œ ์ฆˆ ์ผ€์ด์Šค๋“ค์„ ์ œ๊ณตํ•˜๊ณ ์ž ํ•ฉ๋‹ˆ๋‹ค. ํ•˜์ง€๋งŒ ์˜ˆ์‹œ๋Š” ๊ฒฐ๊ตญ ๊ทธ์ € ์˜ˆ์‹œ๋ผ๋Š” ์ ๋“ค ๊ธฐ์–ตํ•ด์ฃผ์„ธ์š”. ์—ฌ๊ธฐ์„œ ์ œ๊ณต๋˜๋Š” ์˜ˆ์‹œ์ฝ”๋“œ๋“ค์„ ๊ทธ์ € ๋‹จ์ˆœํžˆ ๋ณต์‚ฌ-๋ถ™ํ˜€๋„ฃ๊ธฐํ•˜๋Š” ์‹์œผ๋กœ๋Š” ์—ฌ๋Ÿฌ๋ถ„์ด ๋งˆ์ฃผํ•œ ๋ฌธ์ œ๋“ค์„ ์†์‰ฝ๊ฒŒ ํ•ด๊ฒฐํ•  ์ˆœ ์—†์„ ๊ฒƒ์ž…๋‹ˆ๋‹ค. ๋‹ค์‹œ ๋งํ•ด ์–ด๋А ์ •๋„๋Š” ์—ฌ๋Ÿฌ๋ถ„์˜ ์ƒํ™ฉ๊ณผ ๋‹ˆ์ฆˆ์— ๋งž์ถฐ ์ฝ”๋“œ๋ฅผ ์ผ์ • ๋ถ€๋ถ„ ๊ณ ์ณ๋‚˜๊ฐ€์•ผ ํ•  ๊ฒƒ์ž…๋‹ˆ๋‹ค. ๋”ฐ๋ผ์„œ ๋Œ€๋ถ€๋ถ„์˜ ํ•™์Šต ์˜ˆ์‹œ๋“ค์€ ๋ฐ์ดํ„ฐ์˜ ์ „์ฒ˜๋ฆฌ ๊ณผ์ •๊ณผ ํ•™์Šต ๊ณผ์ •์— ๋Œ€ํ•œ ์ฝ”๋“œ๋“ค์„ ํ•จ๊ป˜ ์ œ๊ณตํ•จ์œผ๋กœ์จ, ์‚ฌ์šฉ์ž๊ฐ€ ๋‹ˆ์ฆˆ์— ๋งž๊ฒŒ ์†์‰ฌ์šด ์ˆ˜์ •ํ•  ์ˆ˜ ์žˆ๋„๋ก ๋•๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค. - **์ž…๋ฌธ์ž ์นœํ™”์ ์ธ** (Beginner-friendly) : ์ด๋ฒˆ ์ฑ•ํ„ฐ๋Š” diffusion ๋ชจ๋ธ๊ณผ `diffusers` ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ์— ๋Œ€ํ•œ ์ „๋ฐ˜์ ์ธ ์ดํ•ด๋ฅผ ๋•๊ธฐ ์œ„ํ•ด ์ž‘์„ฑ๋˜์—ˆ์Šต๋‹ˆ๋‹ค. ๋”ฐ๋ผ์„œ diffusion ๋ชจ๋ธ์— ๋Œ€ํ•œ ์ตœ์‹  SOTA (state-of-the-art) ๋ฐฉ๋ฒ•๋ก ๋“ค ๊ฐ€์šด๋ฐ์„œ๋„, ์ž…๋ฌธ์ž์—๊ฒŒ๋Š” ๋งŽ์ด ์–ด๋ ค์šธ ์ˆ˜ ์žˆ๋‹ค๊ณ  ํŒ๋‹จ๋˜๋ฉด, ํ•ด๋‹น ๋ฐฉ๋ฒ•๋ก ๋“ค์€ ์—ฌ๊ธฐ์„œ ๋‹ค๋ฃจ์ง€ ์•Š์œผ๋ ค๊ณ  ํ•ฉ๋‹ˆ๋‹ค. - **ํ•˜๋‚˜์˜ ํƒœ์Šคํฌ๋งŒ ํฌํ•จํ•  ๊ฒƒ**(One-purpose-only): ์—ฌ๊ธฐ์„œ ๋‹ค๋ฃฐ ์˜ˆ์‹œ๋“ค์€ ํ•˜๋‚˜์˜ ํƒœ์Šคํฌ๋งŒ ํฌํ•จํ•˜๊ณ  ์žˆ์–ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ๋ฌผ๋ก  ์ด๋ฏธ์ง€ ์ดˆํ•ด์ƒํ™”(super-resolution)์™€ ์ด๋ฏธ์ง€ ๋ณด์ •(modification)๊ณผ ๊ฐ™์€ ์œ ์‚ฌํ•œ ๋ชจ๋ธ๋ง ํ”„๋กœ์„ธ์Šค๋ฅผ ๊ฐ–๋Š” ํƒœ์Šคํฌ๋“ค์ด ์กด์žฌํ•˜๊ฒ ์ง€๋งŒ, ํ•˜๋‚˜์˜ ์˜ˆ์ œ์— ํ•˜๋‚˜์˜ ํƒœ์Šคํฌ๋งŒ์„ ๋‹ด๋Š” ๊ฒƒ์ด ๋” ์ดํ•ดํ•˜๊ธฐ ์šฉ์ดํ•˜๋‹ค๊ณ  ํŒ๋‹จํ–ˆ๊ธฐ ๋•Œ๋ฌธ์ž…๋‹ˆ๋‹ค. ์ €ํฌ๋Š” diffusion ๋ชจ๋ธ์˜ ๋Œ€ํ‘œ์ ์ธ ํƒœ์Šคํฌ๋“ค์„ ๋‹ค๋ฃจ๋Š” ๊ณต์‹ ์˜ˆ์ œ๋ฅผ ์ œ๊ณตํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค. *๊ณต์‹* ์˜ˆ์ œ๋Š” ํ˜„์žฌ ์ง„ํ–‰ํ˜•์œผ๋กœ `diffusers` ๊ด€๋ฆฌ์ž๋“ค(maintainers)์— ์˜ํ•ด ๊ด€๋ฆฌ๋˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค. ๋˜ํ•œ ์ €ํฌ๋Š” ์•ž์„œ ์ •์˜ํ•œ ์ €ํฌ์˜ ์ฒ ํ•™์„ ์—„๊ฒฉํ•˜๊ฒŒ ๋”ฐ๋ฅด๊ณ ์ž ๋…ธ๋ ฅํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค. ํ˜น์‹œ ์—ฌ๋Ÿฌ๋ถ„๊ป˜์„œ ์ด๋Ÿฌํ•œ ์˜ˆ์‹œ๊ฐ€ ๋ฐ˜๋“œ์‹œ ํ•„์š”ํ•˜๋‹ค๊ณ  ์ƒ๊ฐ๋˜์‹ ๋‹ค๋ฉด, ์–ธ์ œ๋“ ์ง€ [Feature Request](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feature_request.md&title=) ํ˜น์€ ์ง์ ‘ [Pull Request](https://github.com/huggingface/diffusers/compare)๋ฅผ ์ฃผ์‹œ๊ธฐ ๋ฐ”๋ž๋‹ˆ๋‹ค. ์ €ํฌ๋Š” ์–ธ์ œ๋‚˜ ํ™˜์˜์ž…๋‹ˆ๋‹ค! ํ•™์Šต ์˜ˆ์‹œ๋“ค์€ ๋‹ค์–‘ํ•œ ํƒœ์Šคํฌ๋“ค์— ๋Œ€ํ•ด diffusion ๋ชจ๋ธ์„ ์‚ฌ์ „ํ•™์Šต(pretrain)ํ•˜๊ฑฐ๋‚˜ ํŒŒ์ธํŠœ๋‹(fine-tuning)ํ•˜๋Š” ๋ฒ•์„ ๋ณด์—ฌ์ค๋‹ˆ๋‹ค. ํ˜„์žฌ ๋‹ค์Œ๊ณผ ๊ฐ™์€ ์˜ˆ์ œ๋“ค์„ ์ง€์›ํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค. - [Unconditional Training](./unconditional_training) - [Text-to-Image Training](./text2image) - [Text Inversion](./text_inversion) - [Dreambooth](./dreambooth) memory-efficient attention ์—ฐ์‚ฐ์„ ์ˆ˜ํ–‰ํ•˜๊ธฐ ์œ„ํ•ด, ๊ฐ€๋Šฅํ•˜๋ฉด [xFormers](../optimization/xformers)๋ฅผ ์„ค์น˜ํ•ด์ฃผ์‹œ๊ธฐ ๋ฐ”๋ž๋‹ˆ๋‹ค. ์ด๋ฅผ ํ†ตํ•ด ํ•™์Šต ์†๋„๋ฅผ ๋Š˜๋ฆฌ๊ณ  ๋ฉ”๋ชจ๋ฆฌ์— ๋Œ€ํ•œ ๋ถ€๋‹ด์„ ์ค„์ผ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. | Task | ๐Ÿค— Accelerate | ๐Ÿค— Datasets | Colab |---|---|:---:|:---:| | [**Unconditional Image Generation**](./unconditional_training) | โœ… | โœ… | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) | [**Text-to-Image fine-tuning**](./text2image) | โœ… | โœ… | | [**Textual Inversion**](./text_inversion) | โœ… | - | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb) | [**Dreambooth**](./dreambooth) | โœ… | - | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb) | [**Training with LoRA**](./lora) | โœ… | - | - | | [**ControlNet**](./controlnet) | โœ… | โœ… | - | | [**InstructPix2Pix**](./instructpix2pix) | โœ… | โœ… | - | | [**Custom Diffusion**](./custom_diffusion) | โœ… | โœ… | - | ## ์ปค๋ฎค๋‹ˆํ‹ฐ ๊ณต์‹ ์˜ˆ์ œ ์™ธ์—๋„ **์ปค๋ฎค๋‹ˆํ‹ฐ ์˜ˆ์ œ** ์—ญ์‹œ ์ œ๊ณตํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค. ํ•ด๋‹น ์˜ˆ์ œ๋“ค์€ ์šฐ๋ฆฌ์˜ ์ปค๋ฎค๋‹ˆํ‹ฐ์— ์˜ํ•ด ๊ด€๋ฆฌ๋ฉ๋‹ˆ๋‹ค. ์ปค๋ฎค๋‹ˆํ‹ฐ ์˜ˆ์ฉจ๋Š” ํ•™์Šต ์˜ˆ์‹œ๋‚˜ ์ถ”๋ก  ํŒŒ์ดํ”„๋ผ์ธ์œผ๋กœ ๊ตฌ์„ฑ๋  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด๋Ÿฌํ•œ ์ปค๋ฎค๋‹ˆํ‹ฐ ์˜ˆ์‹œ๋“ค์˜ ๊ฒฝ์šฐ, ์•ž์„œ ์ •์˜ํ–ˆ๋˜ ์ฒ ํ•™๋“ค์„ ์ข€ ๋” ๊ด€๋Œ€ํ•˜๊ฒŒ ์ ์šฉํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค. ๋˜ํ•œ ์ด๋Ÿฌํ•œ ์ปค๋ฎค๋‹ˆํ‹ฐ ์˜ˆ์‹œ๋“ค์˜ ๊ฒฝ์šฐ, ๋ชจ๋“  ์ด์Šˆ๋“ค์— ๋Œ€ํ•œ ์œ ์ง€๋ณด์ˆ˜๋ฅผ ๋ณด์žฅํ•  ์ˆ˜๋Š” ์—†์Šต๋‹ˆ๋‹ค. ์œ ์šฉํ•˜๊ธด ํ•˜์ง€๋งŒ, ์•„์ง์€ ๋Œ€์ค‘์ ์ด์ง€ ๋ชปํ•˜๊ฑฐ๋‚˜ ์ €ํฌ์˜ ์ฒ ํ•™์— ๋ถ€ํ•ฉํ•˜์ง€ ์•Š๋Š” ์˜ˆ์ œ๋“ค์€ [community examples](https://github.com/huggingface/diffusers/tree/main/examples/community) ํด๋”์— ๋‹ด๊ธฐ๊ฒŒ ๋ฉ๋‹ˆ๋‹ค. **Note**: ์ปค๋ฎค๋‹ˆํ‹ฐ ์˜ˆ์ œ๋Š” `diffusers`์— ๊ธฐ์—ฌ(contribution)๋ฅผ ํฌ๋งํ•˜๋Š” ๋ถ„๋“ค์—๊ฒŒ [์•„์ฃผ ์ข‹์€ ๊ธฐ์—ฌ ์ˆ˜๋‹จ](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)์ด ๋  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ## ์ฃผ๋ชฉํ•  ์‚ฌํ•ญ๋“ค ์ตœ์‹  ๋ฒ„์ „์˜ ์˜ˆ์‹œ ์ฝ”๋“œ๋“ค์˜ ์„ฑ๊ณต์ ์ธ ๊ตฌ๋™์„ ๋ณด์žฅํ•˜๊ธฐ ์œ„ํ•ด์„œ๋Š”, ๋ฐ˜๋“œ์‹œ **์†Œ์Šค์ฝ”๋“œ๋ฅผ ํ†ตํ•ด `diffusers`๋ฅผ ์„ค์น˜ํ•ด์•ผ ํ•˜๋ฉฐ,** ํ•ด๋‹น ์˜ˆ์‹œ ์ฝ”๋“œ๋“ค์ด ์š”๊ตฌํ•˜๋Š” ๋””ํŽœ๋˜์‹œ๋“ค ์—ญ์‹œ ์„ค์น˜ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ์ด๋ฅผ ์œ„ํ•ด ์ƒˆ๋กœ์šด ๊ฐ€์ƒ ํ™˜๊ฒฝ์„ ๊ตฌ์ถ•ํ•˜๊ณ  ๋‹ค์Œ์˜ ๋ช…๋ น์–ด๋ฅผ ์‹คํ–‰ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` ๊ทธ ๋‹ค์Œ `cd` ๋ช…๋ น์–ด๋ฅผ ํ†ตํ•ด ํ•ด๋‹น ์˜ˆ์ œ ๋””๋ ‰ํ† ๋ฆฌ์— ์ ‘๊ทผํ•ด์„œ ๋‹ค์Œ ๋ช…๋ น์–ด๋ฅผ ์‹คํ–‰ํ•˜๋ฉด ๋ฉ๋‹ˆ๋‹ค. ```bash pip install -r requirements.txt ```
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/training/unconditional_training.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Unconditional ์ด๋ฏธ์ง€ ์ƒ์„ฑ unconditional ์ด๋ฏธ์ง€ ์ƒ์„ฑ์€ text-to-image ๋˜๋Š” image-to-image ๋ชจ๋ธ๊ณผ ๋‹ฌ๋ฆฌ ํ…์ŠคํŠธ๋‚˜ ์ด๋ฏธ์ง€์— ๋Œ€ํ•œ ์กฐ๊ฑด์ด ์—†์ด ํ•™์Šต ๋ฐ์ดํ„ฐ ๋ถ„ํฌ์™€ ์œ ์‚ฌํ•œ ์ด๋ฏธ์ง€๋งŒ์„ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค. <iframe src="https://stevhliu-ddpm-butterflies-128.hf.space" frameborder="0" width="850" height="550" ></iframe> ์ด ๊ฐ€์ด๋“œ์—์„œ๋Š” ๊ธฐ์กด์— ์กด์žฌํ•˜๋˜ ๋ฐ์ดํ„ฐ์…‹๊ณผ ์ž์‹ ๋งŒ์˜ ์ปค์Šคํ…€ ๋ฐ์ดํ„ฐ์…‹์— ๋Œ€ํ•ด unconditional image generation ๋ชจ๋ธ์„ ํ›ˆ๋ จํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ์„ค๋ช…ํ•ฉ๋‹ˆ๋‹ค. ํ›ˆ๋ จ ์„ธ๋ถ€ ์‚ฌํ•ญ์— ๋Œ€ํ•ด ๋” ์ž์„ธํžˆ ์•Œ๊ณ  ์‹ถ๋‹ค๋ฉด unconditional image generation์„ ์œ„ํ•œ ๋ชจ๋“  ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ๋ฅผ [์—ฌ๊ธฐ](https://github.com/huggingface/diffusers/tree/main/examples/unconditional_image_generation)์—์„œ ํ™•์ธํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์‹คํ–‰ํ•˜๊ธฐ ์ „, ๋จผ์ € ์˜์กด์„ฑ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๋“ค์„ ์„ค์น˜ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ```bash pip install diffusers[training] accelerate datasets ``` ๊ทธ ๋‹ค์Œ ๐Ÿค— [Accelerate](https://github.com/huggingface/accelerate/) ํ™˜๊ฒฝ์„ ์ดˆ๊ธฐํ™”ํ•ฉ๋‹ˆ๋‹ค. ```bash accelerate config ``` ๋ณ„๋„์˜ ์„ค์ • ์—†์ด ๊ธฐ๋ณธ ์„ค์ •์œผ๋กœ ๐Ÿค— [Accelerate](https://github.com/huggingface/accelerate/) ํ™˜๊ฒฝ์„ ์ดˆ๊ธฐํ™”ํ•ด๋ด…์‹œ๋‹ค. ```bash accelerate config default ``` ๋…ธํŠธ๋ถ๊ณผ ๊ฐ™์€ ๋Œ€ํ™”ํ˜• ์‰˜์„ ์ง€์›ํ•˜์ง€ ์•Š๋Š” ํ™˜๊ฒฝ์˜ ๊ฒฝ์šฐ, ๋‹ค์Œ๊ณผ ๊ฐ™์ด ์‚ฌ์šฉํ•ด๋ณผ ์ˆ˜๋„ ์žˆ์Šต๋‹ˆ๋‹ค. ```py from accelerate.utils import write_basic_config write_basic_config() ``` ## ๋ชจ๋ธ์„ ํ—ˆ๋ธŒ์— ์—…๋กœ๋“œํ•˜๊ธฐ ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ์— ๋‹ค์Œ ์ธ์ž๋ฅผ ์ถ”๊ฐ€ํ•˜์—ฌ ํ—ˆ๋ธŒ์— ๋ชจ๋ธ์„ ์—…๋กœ๋“œํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ```bash --push_to_hub ``` ## ์ฒดํฌํฌ์ธํŠธ ์ €์žฅํ•˜๊ณ  ๋ถˆ๋Ÿฌ์˜ค๊ธฐ ํ›ˆ๋ จ ์ค‘ ๋ฌธ์ œ๊ฐ€ ๋ฐœ์ƒํ•  ๊ฒฝ์šฐ๋ฅผ ๋Œ€๋น„ํ•˜์—ฌ ์ฒดํฌํฌ์ธํŠธ๋ฅผ ์ •๊ธฐ์ ์œผ๋กœ ์ €์žฅํ•˜๋Š” ๊ฒƒ์ด ์ข‹์Šต๋‹ˆ๋‹ค. ์ฒดํฌํฌ์ธํŠธ๋ฅผ ์ €์žฅํ•˜๋ ค๋ฉด ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ์— ๋‹ค์Œ ์ธ์ž๋ฅผ ์ „๋‹ฌํ•ฉ๋‹ˆ๋‹ค: ```bash --checkpointing_steps=500 ``` ์ „์ฒด ํ›ˆ๋ จ ์ƒํƒœ๋Š” 500์Šคํ…๋งˆ๋‹ค `output_dir`์˜ ํ•˜์œ„ ํด๋”์— ์ €์žฅ๋˜๋ฉฐ, ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ์— `--resume_from_checkpoint` ์ธ์ž๋ฅผ ์ „๋‹ฌํ•จ์œผ๋กœ์จ ์ฒดํฌํฌ์ธํŠธ๋ฅผ ๋ถˆ๋Ÿฌ์˜ค๊ณ  ํ›ˆ๋ จ์„ ์žฌ๊ฐœํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ```bash --resume_from_checkpoint="checkpoint-1500" ``` ## ํŒŒ์ธํŠœ๋‹ ์ด์ œ ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์‹œ์ž‘ํ•  ์ค€๋น„๊ฐ€ ๋˜์—ˆ์Šต๋‹ˆ๋‹ค! `--dataset_name` ์ธ์ž์— ํŒŒ์ธํŠœ๋‹ํ•  ๋ฐ์ดํ„ฐ์…‹ ์ด๋ฆ„์„ ์ง€์ •ํ•œ ๋‹ค์Œ, `--output_dir` ์ธ์ž์— ์ง€์ •๋œ ๊ฒฝ๋กœ๋กœ ์ €์žฅํ•ฉ๋‹ˆ๋‹ค. ๋ณธ์ธ๋งŒ์˜ ๋ฐ์ดํ„ฐ์…‹๋ฅผ ์‚ฌ์šฉํ•˜๋ ค๋ฉด, [ํ•™์Šต์šฉ ๋ฐ์ดํ„ฐ์…‹ ๋งŒ๋“ค๊ธฐ](create_dataset) ๊ฐ€์ด๋“œ๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ๋Š” `diffusion_pytorch_model.bin` ํŒŒ์ผ์„ ์ƒ์„ฑํ•˜๊ณ , ๊ทธ๊ฒƒ์„ ๋‹น์‹ ์˜ ๋ฆฌํฌ์ง€ํ† ๋ฆฌ์— ์ €์žฅํ•ฉ๋‹ˆ๋‹ค. <Tip> ๐Ÿ’ก ์ „์ฒด ํ•™์Šต์€ V100 GPU 4๊ฐœ๋ฅผ ์‚ฌ์šฉํ•  ๊ฒฝ์šฐ, 2์‹œ๊ฐ„์ด ์†Œ์š”๋ฉ๋‹ˆ๋‹ค. </Tip> ์˜ˆ๋ฅผ ๋“ค์–ด, [Oxford Flowers](https://huggingface.co/datasets/huggan/flowers-102-categories) ๋ฐ์ดํ„ฐ์…‹์„ ์‚ฌ์šฉํ•ด ํŒŒ์ธํŠœ๋‹ํ•  ๊ฒฝ์šฐ: ```bash accelerate launch train_unconditional.py \ --dataset_name="huggan/flowers-102-categories" \ --resolution=64 \ --output_dir="ddpm-ema-flowers-64" \ --train_batch_size=16 \ --num_epochs=100 \ --gradient_accumulation_steps=1 \ --learning_rate=1e-4 \ --lr_warmup_steps=500 \ --mixed_precision=no \ --push_to_hub ``` <div class="flex justify-center"> <img src="https://user-images.githubusercontent.com/26864830/180248660-a0b143d0-b89a-42c5-8656-2ebf6ece7e52.png"/> </div> [Pokemon](https://huggingface.co/datasets/huggan/pokemon) ๋ฐ์ดํ„ฐ์…‹์„ ์‚ฌ์šฉํ•  ๊ฒฝ์šฐ: ```bash accelerate launch train_unconditional.py \ --dataset_name="huggan/pokemon" \ --resolution=64 \ --output_dir="ddpm-ema-pokemon-64" \ --train_batch_size=16 \ --num_epochs=100 \ --gradient_accumulation_steps=1 \ --learning_rate=1e-4 \ --lr_warmup_steps=500 \ --mixed_precision=no \ --push_to_hub ``` <div class="flex justify-center"> <img src="https://user-images.githubusercontent.com/26864830/180248200-928953b4-db38-48db-b0c6-8b740fe6786f.png"/> </div> ### ์—ฌ๋Ÿฌ๊ฐœ์˜ GPU๋กœ ํ›ˆ๋ จํ•˜๊ธฐ `accelerate`์„ ์‚ฌ์šฉํ•˜๋ฉด ์›ํ™œํ•œ ๋‹ค์ค‘ GPU ํ›ˆ๋ จ์ด ๊ฐ€๋Šฅํ•ฉ๋‹ˆ๋‹ค. `accelerate`์„ ์‚ฌ์šฉํ•˜์—ฌ ๋ถ„์‚ฐ ํ›ˆ๋ จ์„ ์‹คํ–‰ํ•˜๋ ค๋ฉด [์—ฌ๊ธฐ](https://huggingface.co/docs/accelerate/basic_tutorials/launch) ์ง€์นจ์„ ๋”ฐ๋ฅด์„ธ์š”. ๋‹ค์Œ์€ ๋ช…๋ น์–ด ์˜ˆ์ œ์ž…๋‹ˆ๋‹ค. ```bash accelerate launch --mixed_precision="fp16" --multi_gpu train_unconditional.py \ --dataset_name="huggan/pokemon" \ --resolution=64 --center_crop --random_flip \ --output_dir="ddpm-ema-pokemon-64" \ --train_batch_size=16 \ --num_epochs=100 \ --gradient_accumulation_steps=1 \ --use_ema \ --learning_rate=1e-4 \ --lr_warmup_steps=500 \ --mixed_precision="fp16" \ --logger="wandb" \ --push_to_hub ```
0
hf_public_repos/diffusers/docs/source/ko
hf_public_repos/diffusers/docs/source/ko/training/dreambooth.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # DreamBooth [DreamBooth](https://arxiv.org/abs/2208.12242)๋Š” ํ•œ ์ฃผ์ œ์— ๋Œ€ํ•œ ์ ์€ ์ด๋ฏธ์ง€(3~5๊ฐœ)๋งŒ์œผ๋กœ๋„ stable diffusion๊ณผ ๊ฐ™์ด text-to-image ๋ชจ๋ธ์„ ๊ฐœ์ธํ™”ํ•  ์ˆ˜ ์žˆ๋Š” ๋ฐฉ๋ฒ•์ž…๋‹ˆ๋‹ค. ์ด๋ฅผ ํ†ตํ•ด ๋ชจ๋ธ์€ ๋‹ค์–‘ํ•œ ์žฅ๋ฉด, ํฌ์ฆˆ ๋ฐ ์žฅ๋ฉด(๋ทฐ)์—์„œ ํ”ผ์‚ฌ์ฒด์— ๋Œ€ํ•ด ๋งฅ๋ฝํ™”(contextualized)๋œ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ![ํ”„๋กœ์ ํŠธ ๋ธ”๋กœ๊ทธ์—์„œ์˜ DreamBooth ์˜ˆ์‹œ](https://dreambooth.github.io/DreamBooth_files/teaser_static.jpg) <small>์—์„œ์˜ Dreambooth ์˜ˆ์‹œ <a href="https://dreambooth.github.io">project's blog.</a></small> ์ด ๊ฐ€์ด๋“œ๋Š” ๋‹ค์–‘ํ•œ GPU, Flax ์‚ฌ์–‘์— ๋Œ€ํ•ด [`CompVis/stable-diffusion-v1-4`](https://huggingface.co/CompVis/stable-diffusion-v1-4) ๋ชจ๋ธ๋กœ DreamBooth๋ฅผ ํŒŒ์ธํŠœ๋‹ํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ๋ณด์—ฌ์ค๋‹ˆ๋‹ค. ๋” ๊นŠ์ด ํŒŒ๊ณ ๋“ค์–ด ์ž‘๋™ ๋ฐฉ์‹์„ ํ™•์ธํ•˜๋Š” ๋ฐ ๊ด€์‹ฌ์ด ์žˆ๋Š” ๊ฒฝ์šฐ, ์ด ๊ฐ€์ด๋“œ์— ์‚ฌ์šฉ๋œ DreamBooth์˜ ๋ชจ๋“  ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ๋ฅผ [์—ฌ๊ธฐ](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth)์—์„œ ์ฐพ์„ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์‹คํ–‰ํ•˜๊ธฐ ์ „์— ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ์˜ ํ•™์Šต์— ํ•„์š”ํ•œ dependencies๋ฅผ ์„ค์น˜ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ๋˜ํ•œ `main` GitHub ๋ธŒ๋žœ์น˜์—์„œ ๐Ÿงจ Diffusers๋ฅผ ์„ค์น˜ํ•˜๋Š” ๊ฒƒ์ด ์ข‹์Šต๋‹ˆ๋‹ค. ```bash pip install git+https://github.com/huggingface/diffusers pip install -U -r diffusers/examples/dreambooth/requirements.txt ``` xFormers๋Š” ํ•™์Šต์— ํ•„์š”ํ•œ ์š”๊ตฌ ์‚ฌํ•ญ์€ ์•„๋‹ˆ์ง€๋งŒ, ๊ฐ€๋Šฅํ•˜๋ฉด [์„ค์น˜](../optimization/xformers)ํ•˜๋Š” ๊ฒƒ์ด ์ข‹์Šต๋‹ˆ๋‹ค. ํ•™์Šต ์†๋„๋ฅผ ๋†’์ด๊ณ  ๋ฉ”๋ชจ๋ฆฌ ์‚ฌ์šฉ๋Ÿ‰์„ ์ค„์ผ ์ˆ˜ ์žˆ๊ธฐ ๋•Œ๋ฌธ์ž…๋‹ˆ๋‹ค. ๋ชจ๋“  dependencies์„ ์„ค์ •ํ•œ ํ›„ ๋‹ค์Œ์„ ์‚ฌ์šฉํ•˜์—ฌ [๐Ÿค— Accelerate](https://github.com/huggingface/accelerate/) ํ™˜๊ฒฝ์„ ๋‹ค์Œ๊ณผ ๊ฐ™์ด ์ดˆ๊ธฐํ™”ํ•ฉ๋‹ˆ๋‹ค: ```bash accelerate config ``` ๋ณ„๋„ ์„ค์ • ์—†์ด ๊ธฐ๋ณธ ๐Ÿค— Accelerate ํ™˜๊ฒฝ์„ ์„ค์น˜ํ•˜๋ ค๋ฉด ๋‹ค์Œ์„ ์‹คํ–‰ํ•ฉ๋‹ˆ๋‹ค: ```bash accelerate config default ``` ๋˜๋Š” ํ˜„์žฌ ํ™˜๊ฒฝ์ด ๋…ธํŠธ๋ถ๊ณผ ๊ฐ™์€ ๋Œ€ํ™”ํ˜• ์…ธ์„ ์ง€์›ํ•˜์ง€ ์•Š๋Š” ๊ฒฝ์šฐ ๋‹ค์Œ์„ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```py from accelerate.utils import write_basic_config write_basic_config() ``` ## ํŒŒ์ธํŠœ๋‹ <Tip warning={true}> DreamBooth ํŒŒ์ธํŠœ๋‹์€ ํ•˜์ดํผํŒŒ๋ผ๋ฏธํ„ฐ์— ๋งค์šฐ ๋ฏผ๊ฐํ•˜๊ณ  ๊ณผ์ ํ•ฉ๋˜๊ธฐ ์‰ฝ์Šต๋‹ˆ๋‹ค. ์ ์ ˆํ•œ ํ•˜์ดํผํŒŒ๋ผ๋ฏธํ„ฐ๋ฅผ ์„ ํƒํ•˜๋Š” ๋ฐ ๋„์›€์ด ๋˜๋„๋ก ๋‹ค์–‘ํ•œ ๊ถŒ์žฅ ์„ค์ •์ด ํฌํ•จ๋œ [์‹ฌ์ธต ๋ถ„์„](https://huggingface.co/blog/dreambooth)์„ ์‚ดํŽด๋ณด๋Š” ๊ฒƒ์ด ์ข‹์Šต๋‹ˆ๋‹ค. </Tip> <frameworkcontent> <pt> [๋ช‡ ์žฅ์˜ ๊ฐ•์•„์ง€ ์ด๋ฏธ์ง€๋“ค](https://drive.google.com/drive/folders/1BO_dyz-p65qhBRRMRA4TbZ8qW4rB99JZ)๋กœ DreamBooth๋ฅผ ์‹œ๋„ํ•ด๋ด…์‹œ๋‹ค. ์ด๋ฅผ ๋‹ค์šด๋กœ๋“œํ•ด ๋””๋ ‰ํ„ฐ๋ฆฌ์— ์ €์žฅํ•œ ๋‹ค์Œ `INSTANCE_DIR` ํ™˜๊ฒฝ ๋ณ€์ˆ˜๋ฅผ ํ•ด๋‹น ๊ฒฝ๋กœ๋กœ ์„ค์ •ํ•ฉ๋‹ˆ๋‹ค: ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="path_to_training_images" export OUTPUT_DIR="path_to_saved_model" ``` ๊ทธ๋Ÿฐ ๋‹ค์Œ, ๋‹ค์Œ ๋ช…๋ น์„ ์‚ฌ์šฉํ•˜์—ฌ ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์‹คํ–‰ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค (์ „์ฒด ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ๋Š” [์—ฌ๊ธฐ](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py)์—์„œ ์ฐพ์„ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค): ```bash accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --instance_prompt="a photo of sks dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --max_train_steps=400 ``` </pt> <jax> TPU์— ์•ก์„ธ์Šคํ•  ์ˆ˜ ์žˆ๊ฑฐ๋‚˜ ๋” ๋น ๋ฅด๊ฒŒ ํ›ˆ๋ จํ•˜๊ณ  ์‹ถ๋‹ค๋ฉด [Flax ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_flax.py)๋ฅผ ์‚ฌ์šฉํ•ด ๋ณผ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. Flax ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ๋Š” gradient checkpointing ๋˜๋Š” gradient accumulation์„ ์ง€์›ํ•˜์ง€ ์•Š์œผ๋ฏ€๋กœ, ๋ฉ”๋ชจ๋ฆฌ๊ฐ€ 30GB ์ด์ƒ์ธ GPU๊ฐ€ ํ•„์š”ํ•ฉ๋‹ˆ๋‹ค. ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์‹คํ–‰ํ•˜๊ธฐ ์ „์— ์š”๊ตฌ ์‚ฌํ•ญ์ด ์„ค์น˜๋˜์–ด ์žˆ๋Š”์ง€ ํ™•์ธํ•˜์‹ญ์‹œ์˜ค. ```bash pip install -U -r requirements.txt ``` ๊ทธ๋Ÿฌ๋ฉด ๋‹ค์Œ ๋ช…๋ น์–ด๋กœ ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์‹คํ–‰์‹œํ‚ฌ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```bash export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" export INSTANCE_DIR="path-to-instance-images" export OUTPUT_DIR="path-to-save-model" python train_dreambooth_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --instance_prompt="a photo of sks dog" \ --resolution=512 \ --train_batch_size=1 \ --learning_rate=5e-6 \ --max_train_steps=400 ``` </jax> </frameworkcontent> ### Prior-preserving(์‚ฌ์ „ ๋ณด์กด) loss๋ฅผ ์‚ฌ์šฉํ•œ ํŒŒ์ธํŠœ๋‹ ๊ณผ์ ํ•ฉ๊ณผ language drift๋ฅผ ๋ฐฉ์ง€ํ•˜๊ธฐ ์œ„ํ•ด ์‚ฌ์ „ ๋ณด์กด์ด ์‚ฌ์šฉ๋ฉ๋‹ˆ๋‹ค(๊ด€์‹ฌ์ด ์žˆ๋Š” ๊ฒฝ์šฐ [๋…ผ๋ฌธ](https://arxiv.org/abs/2208.12242)์„ ์ฐธ์กฐํ•˜์„ธ์š”). ์‚ฌ์ „ ๋ณด์กด์„ ์œ„ํ•ด ๋™์ผํ•œ ํด๋ž˜์Šค์˜ ๋‹ค๋ฅธ ์ด๋ฏธ์ง€๋ฅผ ํ•™์Šต ํ”„๋กœ์„ธ์Šค์˜ ์ผ๋ถ€๋กœ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค. ์ข‹์€ ์ ์€ Stable Diffusion ๋ชจ๋ธ ์ž์ฒด๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์ด๋Ÿฌํ•œ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•  ์ˆ˜ ์žˆ๋‹ค๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค! ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ๋Š” ์ƒ์„ฑ๋œ ์ด๋ฏธ์ง€๋ฅผ ์šฐ๋ฆฌ๊ฐ€ ์ง€์ •ํ•œ ๋กœ์ปฌ ๊ฒฝ๋กœ์— ์ €์žฅํ•ฉ๋‹ˆ๋‹ค. ์ €์ž๋“ค์— ๋”ฐ๋ฅด๋ฉด ์‚ฌ์ „ ๋ณด์กด์„ ์œ„ํ•ด `num_epochs * num_samples`๊ฐœ์˜ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•˜๋Š” ๊ฒƒ์ด ์ข‹์Šต๋‹ˆ๋‹ค. 200-300๊ฐœ์—์„œ ๋Œ€๋ถ€๋ถ„ ์ž˜ ์ž‘๋™ํ•ฉ๋‹ˆ๋‹ค. <frameworkcontent> <pt> ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="path_to_training_images" export CLASS_DIR="path_to_class_images" export OUTPUT_DIR="path_to_saved_model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 ``` </pt> <jax> ```bash export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" export INSTANCE_DIR="path-to-instance-images" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" python train_dreambooth_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --learning_rate=5e-6 \ --num_class_images=200 \ --max_train_steps=800 ``` </jax> </frameworkcontent> ## ํ…์ŠคํŠธ ์ธ์ฝ”๋”์™€ and UNet๋กœ ํŒŒ์ธํŠœ๋‹ํ•˜๊ธฐ ํ•ด๋‹น ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์‚ฌ์šฉํ•˜๋ฉด `unet`๊ณผ ํ•จ๊ป˜ `text_encoder`๋ฅผ ํŒŒ์ธํŠœ๋‹ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์‹คํ—˜์—์„œ(์ž์„ธํ•œ ๋‚ด์šฉ์€ [๐Ÿงจ Diffusers๋ฅผ ์‚ฌ์šฉํ•ด DreamBooth๋กœ Stable Diffusion ํ•™์Šตํ•˜๊ธฐ](https://huggingface.co/blog/dreambooth) ๊ฒŒ์‹œ๋ฌผ์„ ํ™•์ธํ•˜์„ธ์š”), ํŠนํžˆ ์–ผ๊ตด ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•  ๋•Œ ํ›จ์”ฌ ๋” ๋‚˜์€ ๊ฒฐ๊ณผ๋ฅผ ์–ป์„ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. <Tip warning={true}> ํ…์ŠคํŠธ ์ธ์ฝ”๋”๋ฅผ ํ•™์Šต์‹œํ‚ค๋ ค๋ฉด ์ถ”๊ฐ€ ๋ฉ”๋ชจ๋ฆฌ๊ฐ€ ํ•„์š”ํ•ด 16GB GPU๋กœ๋Š” ๋™์ž‘ํ•˜์ง€ ์•Š์Šต๋‹ˆ๋‹ค. ์ด ์˜ต์…˜์„ ์‚ฌ์šฉํ•˜๋ ค๋ฉด ์ตœ์†Œ 24GB VRAM์ด ํ•„์š”ํ•ฉ๋‹ˆ๋‹ค. </Tip> `--train_text_encoder` ์ธ์ˆ˜๋ฅผ ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ์— ์ „๋‹ฌํ•˜์—ฌ `text_encoder` ๋ฐ `unet`์„ ํŒŒ์ธํŠœ๋‹ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: <frameworkcontent> <pt> ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="path_to_training_images" export CLASS_DIR="path_to_class_images" export OUTPUT_DIR="path_to_saved_model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_text_encoder \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --use_8bit_adam --gradient_checkpointing \ --learning_rate=2e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 ``` </pt> <jax> ```bash export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" export INSTANCE_DIR="path-to-instance-images" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" python train_dreambooth_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --train_text_encoder \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --learning_rate=2e-6 \ --num_class_images=200 \ --max_train_steps=800 ``` </jax> </frameworkcontent> ## LoRA๋กœ ํŒŒ์ธํŠœ๋‹ํ•˜๊ธฐ DreamBooth์—์„œ ๋Œ€๊ทœ๋ชจ ๋ชจ๋ธ์˜ ํ•™์Šต์„ ๊ฐ€์†ํ™”ํ•˜๊ธฐ ์œ„ํ•œ ํŒŒ์ธํŠœ๋‹ ๊ธฐ์ˆ ์ธ LoRA(Low-Rank Adaptation of Large Language Models)๋ฅผ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ž์„ธํ•œ ๋‚ด์šฉ์€ [LoRA ํ•™์Šต](training/lora#dreambooth) ๊ฐ€์ด๋“œ๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. ### ํ•™์Šต ์ค‘ ์ฒดํฌํฌ์ธํŠธ ์ €์žฅํ•˜๊ธฐ Dreambooth๋กœ ํ›ˆ๋ จํ•˜๋Š” ๋™์•ˆ ๊ณผ์ ํ•ฉํ•˜๊ธฐ ์‰ฌ์šฐ๋ฏ€๋กœ, ๋•Œ๋•Œ๋กœ ํ•™์Šต ์ค‘์— ์ •๊ธฐ์ ์ธ ์ฒดํฌํฌ์ธํŠธ๋ฅผ ์ €์žฅํ•˜๋Š” ๊ฒƒ์ด ์œ ์šฉํ•ฉ๋‹ˆ๋‹ค. ์ค‘๊ฐ„ ์ฒดํฌํฌ์ธํŠธ ์ค‘ ํ•˜๋‚˜๊ฐ€ ์ตœ์ข… ๋ชจ๋ธ๋ณด๋‹ค ๋” ์ž˜ ์ž‘๋™ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค! ์ฒดํฌํฌ์ธํŠธ ์ €์žฅ ๊ธฐ๋Šฅ์„ ํ™œ์„ฑํ™”ํ•˜๋ ค๋ฉด ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ์— ๋‹ค์Œ ์ธ์ˆ˜๋ฅผ ์ „๋‹ฌํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค: ```bash --checkpointing_steps=500 ``` ์ด๋ ‡๊ฒŒ ํ•˜๋ฉด `output_dir`์˜ ํ•˜์œ„ ํด๋”์— ์ „์ฒด ํ•™์Šต ์ƒํƒœ๊ฐ€ ์ €์žฅ๋ฉ๋‹ˆ๋‹ค. ํ•˜์œ„ ํด๋” ์ด๋ฆ„์€ ์ ‘๋‘์‚ฌ `checkpoint-`๋กœ ์‹œ์ž‘ํ•˜๊ณ  ์ง€๊ธˆ๊นŒ์ง€ ์ˆ˜ํ–‰๋œ step ์ˆ˜์ž…๋‹ˆ๋‹ค. ์˜ˆ์‹œ๋กœ `checkpoint-1500`์€ 1500 ํ•™์Šต step ํ›„์— ์ €์žฅ๋œ ์ฒดํฌํฌ์ธํŠธ์ž…๋‹ˆ๋‹ค. #### ์ €์žฅ๋œ ์ฒดํฌํฌ์ธํŠธ์—์„œ ํ›ˆ๋ จ ์žฌ๊ฐœํ•˜๊ธฐ ์ €์žฅ๋œ ์ฒดํฌํฌ์ธํŠธ์—์„œ ํ›ˆ๋ จ์„ ์žฌ๊ฐœํ•˜๋ ค๋ฉด, `--resume_from_checkpoint` ์ธ์ˆ˜๋ฅผ ์ „๋‹ฌํ•œ ๋‹ค์Œ ์‚ฌ์šฉํ•  ์ฒดํฌํฌ์ธํŠธ์˜ ์ด๋ฆ„์„ ์ง€์ •ํ•˜๋ฉด ๋ฉ๋‹ˆ๋‹ค. ํŠน์ˆ˜ ๋ฌธ์ž์—ด `"latest"`๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์ €์žฅ๋œ ๋งˆ์ง€๋ง‰ ์ฒดํฌํฌ์ธํŠธ(์ฆ‰, step ์ˆ˜๊ฐ€ ๊ฐ€์žฅ ๋งŽ์€ ์ฒดํฌํฌ์ธํŠธ)์—์„œ ์žฌ๊ฐœํ•  ์ˆ˜๋„ ์žˆ์Šต๋‹ˆ๋‹ค. ์˜ˆ๋ฅผ ๋“ค์–ด ๋‹ค์Œ์€ 1500 step ํ›„์— ์ €์žฅ๋œ ์ฒดํฌํฌ์ธํŠธ์—์„œ๋ถ€ํ„ฐ ํ•™์Šต์„ ์žฌ๊ฐœํ•ฉ๋‹ˆ๋‹ค: ```bash --resume_from_checkpoint="checkpoint-1500" ``` ์›ํ•˜๋Š” ๊ฒฝ์šฐ ์ผ๋ถ€ ํ•˜์ดํผํŒŒ๋ผ๋ฏธํ„ฐ๋ฅผ ์กฐ์ •ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. #### ์ €์žฅ๋œ ์ฒดํฌํฌ์ธํŠธ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์ถ”๋ก  ์ˆ˜ํ–‰ํ•˜๊ธฐ ์ €์žฅ๋œ ์ฒดํฌํฌ์ธํŠธ๋Š” ํ›ˆ๋ จ ์žฌ๊ฐœ์— ์ ํ•ฉํ•œ ํ˜•์‹์œผ๋กœ ์ €์žฅ๋ฉ๋‹ˆ๋‹ค. ์—ฌ๊ธฐ์—๋Š” ๋ชจ๋ธ ๊ฐ€์ค‘์น˜๋ฟ๋งŒ ์•„๋‹ˆ๋ผ ์˜ตํ‹ฐ๋งˆ์ด์ €, ๋ฐ์ดํ„ฐ ๋กœ๋” ๋ฐ ํ•™์Šต๋ฅ ์˜ ์ƒํƒœ๋„ ํฌํ•จ๋ฉ๋‹ˆ๋‹ค. **`"accelerate>=0.16.0"`**์ด ์„ค์น˜๋œ ๊ฒฝ์šฐ ๋‹ค์Œ ์ฝ”๋“œ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์ค‘๊ฐ„ ์ฒดํฌํฌ์ธํŠธ์—์„œ ์ถ”๋ก ์„ ์‹คํ–‰ํ•ฉ๋‹ˆ๋‹ค. ```python from diffusers import DiffusionPipeline, UNet2DConditionModel from transformers import CLIPTextModel import torch # ํ•™์Šต์— ์‚ฌ์šฉ๋œ ๊ฒƒ๊ณผ ๋™์ผํ•œ ์ธ์ˆ˜(model, revision)๋กœ ํŒŒ์ดํ”„๋ผ์ธ์„ ๋ถˆ๋Ÿฌ์˜ต๋‹ˆ๋‹ค. model_id = "CompVis/stable-diffusion-v1-4" unet = UNet2DConditionModel.from_pretrained("/sddata/dreambooth/daruma-v2-1/checkpoint-100/unet") # `args.train_text_encoder`๋กœ ํ•™์Šตํ•œ ๊ฒฝ์šฐ๋ฉด ํ…์ŠคํŠธ ์ธ์ฝ”๋”๋ฅผ ๊ผญ ๋ถˆ๋Ÿฌ์˜ค์„ธ์š” text_encoder = CLIPTextModel.from_pretrained("/sddata/dreambooth/daruma-v2-1/checkpoint-100/text_encoder") pipeline = DiffusionPipeline.from_pretrained(model_id, unet=unet, text_encoder=text_encoder, dtype=torch.float16) pipeline.to("cuda") # ์ถ”๋ก ์„ ์ˆ˜ํ–‰ํ•˜๊ฑฐ๋‚˜ ์ €์žฅํ•˜๊ฑฐ๋‚˜, ํ—ˆ๋ธŒ์— ํ‘ธ์‹œํ•ฉ๋‹ˆ๋‹ค. pipeline.save_pretrained("dreambooth-pipeline") ``` If you have **`"accelerate<0.16.0"`** installed, you need to convert it to an inference pipeline first: ```python from accelerate import Accelerator from diffusers import DiffusionPipeline # ํ•™์Šต์— ์‚ฌ์šฉ๋œ ๊ฒƒ๊ณผ ๋™์ผํ•œ ์ธ์ˆ˜(model, revision)๋กœ ํŒŒ์ดํ”„๋ผ์ธ์„ ๋ถˆ๋Ÿฌ์˜ต๋‹ˆ๋‹ค. model_id = "CompVis/stable-diffusion-v1-4" pipeline = DiffusionPipeline.from_pretrained(model_id) accelerator = Accelerator() # ์ดˆ๊ธฐ ํ•™์Šต์— `--train_text_encoder`๊ฐ€ ์‚ฌ์šฉ๋œ ๊ฒฝ์šฐ text_encoder๋ฅผ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค. unet, text_encoder = accelerator.prepare(pipeline.unet, pipeline.text_encoder) # ์ฒดํฌํฌ์ธํŠธ ๊ฒฝ๋กœ๋กœ๋ถ€ํ„ฐ ์ƒํƒœ๋ฅผ ๋ณต์›ํ•ฉ๋‹ˆ๋‹ค. ์—ฌ๊ธฐ์„œ๋Š” ์ ˆ๋Œ€ ๊ฒฝ๋กœ๋ฅผ ์‚ฌ์šฉํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. accelerator.load_state("/sddata/dreambooth/daruma-v2-1/checkpoint-100") # unwrapped ๋ชจ๋ธ๋กœ ํŒŒ์ดํ”„๋ผ์ธ์„ ๋‹ค์‹œ ๋นŒ๋“œํ•ฉ๋‹ˆ๋‹ค.(.unet and .text_encoder๋กœ์˜ ํ• ๋‹น๋„ ์ž‘๋™ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค) pipeline = DiffusionPipeline.from_pretrained( model_id, unet=accelerator.unwrap_model(unet), text_encoder=accelerator.unwrap_model(text_encoder), ) # ์ถ”๋ก ์„ ์ˆ˜ํ–‰ํ•˜๊ฑฐ๋‚˜ ์ €์žฅํ•˜๊ฑฐ๋‚˜, ํ—ˆ๋ธŒ์— ํ‘ธ์‹œํ•ฉ๋‹ˆ๋‹ค. pipeline.save_pretrained("dreambooth-pipeline") ``` ## ๊ฐ GPU ์šฉ๋Ÿ‰์—์„œ์˜ ์ตœ์ ํ™” ํ•˜๋“œ์›จ์–ด์— ๋”ฐ๋ผ 16GB์—์„œ 8GB๊นŒ์ง€ GPU์—์„œ DreamBooth๋ฅผ ์ตœ์ ํ™”ํ•˜๋Š” ๋ช‡ ๊ฐ€์ง€ ๋ฐฉ๋ฒ•์ด ์žˆ์Šต๋‹ˆ๋‹ค! ### xFormers [xFormers](https://github.com/facebookresearch/xformers)๋Š” Transformers๋ฅผ ์ตœ์ ํ™”ํ•˜๊ธฐ ์œ„ํ•œ toolbox์ด๋ฉฐ, ๐Ÿงจ Diffusers์—์„œ ์‚ฌ์šฉ๋˜๋Š”[memory-efficient attention](https://facebookresearch.github.io/xformers/components/ops.html#module-xformers.ops) ๋ฉ”์ปค๋‹ˆ์ฆ˜์„ ํฌํ•จํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค. [xFormers๋ฅผ ์„ค์น˜](./optimization/xformers)ํ•œ ๋‹ค์Œ ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ์— ๋‹ค์Œ ์ธ์ˆ˜๋ฅผ ์ถ”๊ฐ€ํ•ฉ๋‹ˆ๋‹ค: ```bash --enable_xformers_memory_efficient_attention ``` xFormers๋Š” Flax์—์„œ ์‚ฌ์šฉํ•  ์ˆ˜ ์—†์Šต๋‹ˆ๋‹ค. ### ๊ทธ๋ž˜๋””์–ธํŠธ ์—†์Œ์œผ๋กœ ์„ค์ • ๋ฉ”๋ชจ๋ฆฌ ์‚ฌ์šฉ๋Ÿ‰์„ ์ค„์ผ ์ˆ˜ ์žˆ๋Š” ๋˜ ๋‹ค๋ฅธ ๋ฐฉ๋ฒ•์€ [๊ธฐ์šธ๊ธฐ ์„ค์ •](https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html)์„ 0 ๋Œ€์‹  `None`์œผ๋กœ ํ•˜๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค. ๊ทธ๋Ÿฌ๋‚˜ ์ด๋กœ ์ธํ•ด ํŠน์ • ๋™์ž‘์ด ๋ณ€๊ฒฝ๋  ์ˆ˜ ์žˆ์œผ๋ฏ€๋กœ ๋ฌธ์ œ๊ฐ€ ๋ฐœ์ƒํ•˜๋ฉด ์ด ์ธ์ˆ˜๋ฅผ ์ œ๊ฑฐํ•ด ๋ณด์‹ญ์‹œ์˜ค. ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ์— ๋‹ค์Œ ์ธ์ˆ˜๋ฅผ ์ถ”๊ฐ€ํ•˜์—ฌ ๊ทธ๋ž˜๋””์–ธํŠธ๋ฅผ `None`์œผ๋กœ ์„ค์ •ํ•ฉ๋‹ˆ๋‹ค. ```bash --set_grads_to_none ``` ### 16GB GPU Gradient checkpointing๊ณผ [bitsandbytes](https://github.com/TimDettmers/bitsandbytes)์˜ 8๋น„ํŠธ ์˜ตํ‹ฐ๋งˆ์ด์ €์˜ ๋„์›€์œผ๋กœ, 16GB GPU์—์„œ dreambooth๋ฅผ ํ›ˆ๋ จํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. bitsandbytes๊ฐ€ ์„ค์น˜๋˜์–ด ์žˆ๋Š”์ง€ ํ™•์ธํ•˜์„ธ์š”: ```bash pip install bitsandbytes ``` ๊ทธ ๋‹ค์Œ, ํ•™์Šต ์Šคํฌ๋ฆฝํŠธ์— `--use_8bit_adam` ์˜ต์…˜์„ ๋ช…์‹œํ•ฉ๋‹ˆ๋‹ค: ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="path_to_training_images" export CLASS_DIR="path_to_class_images" export OUTPUT_DIR="path_to_saved_model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=2 --gradient_checkpointing \ --use_8bit_adam \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 ``` ### 12GB GPU 12GB GPU์—์„œ DreamBooth๋ฅผ ์‹คํ–‰ํ•˜๋ ค๋ฉด gradient checkpointing, 8๋น„ํŠธ ์˜ตํ‹ฐ๋งˆ์ด์ €, xFormers๋ฅผ ํ™œ์„ฑํ™”ํ•˜๊ณ  ๊ทธ๋ž˜๋””์–ธํŠธ๋ฅผ `None`์œผ๋กœ ์„ค์ •ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="path-to-instance-images" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 --gradient_checkpointing \ --use_8bit_adam \ --enable_xformers_memory_efficient_attention \ --set_grads_to_none \ --learning_rate=2e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 ``` ### 8GB GPU์—์„œ ํ•™์Šตํ•˜๊ธฐ 8GB GPU์— ๋Œ€ํ•ด์„œ๋Š” [DeepSpeed](https://www.deepspeed.ai/)๋ฅผ ์‚ฌ์šฉํ•ด ์ผ๋ถ€ ํ…์„œ๋ฅผ VRAM์—์„œ CPU ๋˜๋Š” NVME๋กœ ์˜คํ”„๋กœ๋“œํ•˜์—ฌ ๋” ์ ์€ GPU ๋ฉ”๋ชจ๋ฆฌ๋กœ ํ•™์Šตํ•  ์ˆ˜๋„ ์žˆ์Šต๋‹ˆ๋‹ค. ๐Ÿค— Accelerate ํ™˜๊ฒฝ์„ ๊ตฌ์„ฑํ•˜๋ ค๋ฉด ๋‹ค์Œ ๋ช…๋ น์„ ์‹คํ–‰ํ•˜์„ธ์š”: ```bash accelerate config ``` ํ™˜๊ฒฝ ๊ตฌ์„ฑ ์ค‘์— DeepSpeed๋ฅผ ์‚ฌ์šฉํ•  ๊ฒƒ์„ ํ™•์ธํ•˜์„ธ์š”. ๊ทธ๋Ÿฌ๋ฉด DeepSpeed stage 2, fp16 ํ˜ผํ•ฉ ์ •๋ฐ€๋„๋ฅผ ๊ฒฐํ•ฉํ•˜๊ณ  ๋ชจ๋ธ ๋งค๊ฐœ๋ณ€์ˆ˜์™€ ์˜ตํ‹ฐ๋งˆ์ด์ € ์ƒํƒœ๋ฅผ ๋ชจ๋‘ CPU๋กœ ์˜คํ”„๋กœ๋“œํ•˜๋ฉด 8GB VRAM ๋ฏธ๋งŒ์—์„œ ํ•™์Šตํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋‹จ์ ์€ ๋” ๋งŽ์€ ์‹œ์Šคํ…œ RAM(์•ฝ 25GB)์ด ํ•„์š”ํ•˜๋‹ค๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค. ์ถ”๊ฐ€ ๊ตฌ์„ฑ ์˜ต์…˜์€ [DeepSpeed ๋ฌธ์„œ](https://huggingface.co/docs/accelerate/usage_guides/deepspeed)๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. ๋˜ํ•œ ๊ธฐ๋ณธ Adam ์˜ตํ‹ฐ๋งˆ์ด์ €๋ฅผ DeepSpeed์˜ ์ตœ์ ํ™”๋œ Adam ๋ฒ„์ „์œผ๋กœ ๋ณ€๊ฒฝํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ์ด๋Š” ์ƒ๋‹นํ•œ ์†๋„ ํ–ฅ์ƒ์„ ์œ„ํ•œ Adam์ธ [`deepspeed.ops.adam.DeepSpeedCPUAdam`](https://deepspeed.readthedocs.io/en/latest/optimizers.html#adam-cpu)์ž…๋‹ˆ๋‹ค. `DeepSpeedCPUAdam`์„ ํ™œ์„ฑํ™”ํ•˜๋ ค๋ฉด ์‹œ์Šคํ…œ์˜ CUDA toolchain ๋ฒ„์ „์ด PyTorch์™€ ํ•จ๊ป˜ ์„ค์น˜๋œ ๊ฒƒ๊ณผ ๋™์ผํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. 8๋น„ํŠธ ์˜ตํ‹ฐ๋งˆ์ด์ €๋Š” ํ˜„์žฌ DeepSpeed์™€ ํ˜ธํ™˜๋˜์ง€ ์•Š๋Š” ๊ฒƒ ๊ฐ™์Šต๋‹ˆ๋‹ค. ๋‹ค์Œ ๋ช…๋ น์œผ๋กœ ํ•™์Šต์„ ์‹œ์ž‘ํ•ฉ๋‹ˆ๋‹ค: ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="path_to_training_images" export CLASS_DIR="path_to_class_images" export OUTPUT_DIR="path_to_saved_model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --sample_batch_size=1 \ --gradient_accumulation_steps=1 --gradient_checkpointing \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --max_train_steps=800 \ --mixed_precision=fp16 ``` ## ์ถ”๋ก  ๋ชจ๋ธ์„ ํ•™์Šตํ•œ ํ›„์—๋Š”, ๋ชจ๋ธ์ด ์ €์žฅ๋œ ๊ฒฝ๋กœ๋ฅผ ์ง€์ •ํ•ด [`StableDiffusionPipeline`]๋กœ ์ถ”๋ก ์„ ์ˆ˜ํ–‰ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ํ”„๋กฌํ”„ํŠธ์— ํ•™์Šต์— ์‚ฌ์šฉ๋œ ํŠน์ˆ˜ `์‹๋ณ„์ž`(์ด์ „ ์˜ˆ์‹œ์˜ `sks`)๊ฐ€ ํฌํ•จ๋˜์–ด ์žˆ๋Š”์ง€ ํ™•์ธํ•˜์„ธ์š”. **`"accelerate>=0.16.0"`**์ด ์„ค์น˜๋˜์–ด ์žˆ๋Š” ๊ฒฝ์šฐ ๋‹ค์Œ ์ฝ”๋“œ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์ค‘๊ฐ„ ์ฒดํฌํฌ์ธํŠธ์—์„œ ์ถ”๋ก ์„ ์‹คํ–‰ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```python from diffusers import StableDiffusionPipeline import torch model_id = "path_to_saved_model" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") prompt = "A photo of sks dog in a bucket" image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("dog-bucket.png") ``` [์ €์žฅ๋œ ํ•™์Šต ์ฒดํฌํฌ์ธํŠธ](#inference-from-a-saved-checkpoint)์—์„œ๋„ ์ถ”๋ก ์„ ์‹คํ–‰ํ•  ์ˆ˜๋„ ์žˆ์Šต๋‹ˆ๋‹ค.
0