repo_id
stringlengths 15
89
| file_path
stringlengths 27
180
| content
stringlengths 1
2.23M
| __index_level_0__
int64 0
0
|
|---|---|---|---|
hf_public_repos/diffusers/tests/pipelines
|
hf_public_repos/diffusers/tests/pipelines/stable_unclip/test_stable_unclip.py
|
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNet2DConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, nightly, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class StableUnCLIPPipelineFastTests(
PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase
):
pipeline_class = StableUnCLIPPipeline
params = TEXT_TO_IMAGE_PARAMS
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
image_params = TEXT_TO_IMAGE_IMAGE_PARAMS
image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
test_xformers_attention = False
def get_dummy_components(self):
embedder_hidden_size = 32
embedder_projection_dim = embedder_hidden_size
# prior components
torch.manual_seed(0)
prior_tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
torch.manual_seed(0)
prior_text_encoder = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=embedder_hidden_size,
projection_dim=embedder_projection_dim,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
)
)
torch.manual_seed(0)
prior = PriorTransformer(
num_attention_heads=2,
attention_head_dim=12,
embedding_dim=embedder_projection_dim,
num_layers=1,
)
torch.manual_seed(0)
prior_scheduler = DDPMScheduler(
variance_type="fixed_small_log",
prediction_type="sample",
num_train_timesteps=1000,
clip_sample=True,
clip_sample_range=5.0,
beta_schedule="squaredcos_cap_v2",
)
# regular denoising components
torch.manual_seed(0)
image_normalizer = StableUnCLIPImageNormalizer(embedding_dim=embedder_hidden_size)
image_noising_scheduler = DDPMScheduler(beta_schedule="squaredcos_cap_v2")
torch.manual_seed(0)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
torch.manual_seed(0)
text_encoder = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=embedder_hidden_size,
projection_dim=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
)
)
torch.manual_seed(0)
unet = UNet2DConditionModel(
sample_size=32,
in_channels=4,
out_channels=4,
down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"),
up_block_types=("UpBlock2D", "CrossAttnUpBlock2D"),
block_out_channels=(32, 64),
attention_head_dim=(2, 4),
class_embed_type="projection",
# The class embeddings are the noise augmented image embeddings.
# I.e. the image embeddings concated with the noised embeddings of the same dimension
projection_class_embeddings_input_dim=embedder_projection_dim * 2,
cross_attention_dim=embedder_hidden_size,
layers_per_block=1,
upcast_attention=True,
use_linear_projection=True,
)
torch.manual_seed(0)
scheduler = DDIMScheduler(
beta_schedule="scaled_linear",
beta_start=0.00085,
beta_end=0.012,
prediction_type="v_prediction",
set_alpha_to_one=False,
steps_offset=1,
)
torch.manual_seed(0)
vae = AutoencoderKL()
components = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
# Overriding PipelineTesterMixin::test_attention_slicing_forward_pass
# because UnCLIP GPU undeterminism requires a looser check.
def test_attention_slicing_forward_pass(self):
test_max_difference = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=test_max_difference)
# Overriding PipelineTesterMixin::test_inference_batch_single_identical
# because UnCLIP undeterminism requires a looser check.
def test_inference_batch_single_identical(self):
self._test_inference_batch_single_identical(expected_max_diff=1e-3)
@nightly
@require_torch_gpu
class StableUnCLIPPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_stable_unclip(self):
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy"
)
pipe = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l", torch_dtype=torch.float16)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
generator = torch.Generator(device="cpu").manual_seed(0)
output = pipe("anime turle", generator=generator, output_type="np")
image = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(image, expected_image)
def test_stable_unclip_pipeline_with_sequential_cpu_offloading(self):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
pipe = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l", torch_dtype=torch.float16)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_ = pipe(
"anime turtle",
prior_num_inference_steps=2,
num_inference_steps=2,
output_type="np",
)
mem_bytes = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 0
|
hf_public_repos/diffusers/tests/pipelines
|
hf_public_repos/diffusers/tests/pipelines/stable_unclip/test_stable_unclip_img2img.py
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImg2ImgPipeline, UNet2DConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
nightly,
require_torch_gpu,
skip_mps,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class StableUnCLIPImg2ImgPipelineFastTests(
PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase
):
pipeline_class = StableUnCLIPImg2ImgPipeline
params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
image_params = frozenset(
[]
) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
image_latents_params = frozenset([])
def get_dummy_components(self):
embedder_hidden_size = 32
embedder_projection_dim = embedder_hidden_size
# image encoding components
feature_extractor = CLIPImageProcessor(crop_size=32, size=32)
torch.manual_seed(0)
image_encoder = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=embedder_hidden_size,
projection_dim=embedder_projection_dim,
num_hidden_layers=5,
num_attention_heads=4,
image_size=32,
intermediate_size=37,
patch_size=1,
)
)
# regular denoising components
torch.manual_seed(0)
image_normalizer = StableUnCLIPImageNormalizer(embedding_dim=embedder_hidden_size)
image_noising_scheduler = DDPMScheduler(beta_schedule="squaredcos_cap_v2")
torch.manual_seed(0)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
torch.manual_seed(0)
text_encoder = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=embedder_hidden_size,
projection_dim=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
)
)
torch.manual_seed(0)
unet = UNet2DConditionModel(
sample_size=32,
in_channels=4,
out_channels=4,
down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"),
up_block_types=("UpBlock2D", "CrossAttnUpBlock2D"),
block_out_channels=(32, 64),
attention_head_dim=(2, 4),
class_embed_type="projection",
# The class embeddings are the noise augmented image embeddings.
# I.e. the image embeddings concated with the noised embeddings of the same dimension
projection_class_embeddings_input_dim=embedder_projection_dim * 2,
cross_attention_dim=embedder_hidden_size,
layers_per_block=1,
upcast_attention=True,
use_linear_projection=True,
)
torch.manual_seed(0)
scheduler = DDIMScheduler(
beta_schedule="scaled_linear",
beta_start=0.00085,
beta_end=0.012,
prediction_type="v_prediction",
set_alpha_to_one=False,
steps_offset=1,
)
torch.manual_seed(0)
vae = AutoencoderKL()
components = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def get_dummy_inputs(self, device, seed=0, pil_image=True):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
input_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
if pil_image:
input_image = input_image * 0.5 + 0.5
input_image = input_image.clamp(0, 1)
input_image = input_image.cpu().permute(0, 2, 3, 1).float().numpy()
input_image = DiffusionPipeline.numpy_to_pil(input_image)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def test_image_embeds_none(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
sd_pipe = StableUnCLIPImg2ImgPipeline(**components)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
inputs.update({"image_embeds": None})
image = sd_pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
expected_slice = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
# Overriding PipelineTesterMixin::test_attention_slicing_forward_pass
# because GPU undeterminism requires a looser check.
def test_attention_slicing_forward_pass(self):
test_max_difference = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=test_max_difference)
# Overriding PipelineTesterMixin::test_inference_batch_single_identical
# because undeterminism requires a looser check.
def test_inference_batch_single_identical(self):
self._test_inference_batch_single_identical(expected_max_diff=1e-3)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),
reason="XFormers attention is only available with CUDA and `xformers` installed",
)
def test_xformers_attention_forwardGenerator_pass(self):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=False)
@nightly
@require_torch_gpu
class StableUnCLIPImg2ImgPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_stable_unclip_l_img2img(self):
input_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png"
)
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy"
)
pipe = StableUnCLIPImg2ImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img", torch_dtype=torch.float16
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
generator = torch.Generator(device="cpu").manual_seed(0)
output = pipe(input_image, "anime turle", generator=generator, output_type="np")
image = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(image, expected_image)
def test_stable_unclip_h_img2img(self):
input_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png"
)
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy"
)
pipe = StableUnCLIPImg2ImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img", torch_dtype=torch.float16
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
generator = torch.Generator(device="cpu").manual_seed(0)
output = pipe(input_image, "anime turle", generator=generator, output_type="np")
image = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(image, expected_image)
def test_stable_unclip_img2img_pipeline_with_sequential_cpu_offloading(self):
input_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png"
)
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
pipe = StableUnCLIPImg2ImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img", torch_dtype=torch.float16
)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_ = pipe(
input_image,
"anime turtle",
num_inference_steps=2,
output_type="np",
)
mem_bytes = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 0
|
hf_public_repos/diffusers/tests/pipelines
|
hf_public_repos/diffusers/tests/pipelines/dit/test_dit.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, Transformer2DModel
from diffusers.utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, nightly, require_torch_gpu, torch_device
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class DiTPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = DiTPipeline
params = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
required_optional_params = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
batch_params = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
def get_dummy_components(self):
torch.manual_seed(0)
transformer = Transformer2DModel(
sample_size=16,
num_layers=2,
patch_size=4,
attention_head_dim=8,
num_attention_heads=2,
in_channels=4,
out_channels=8,
attention_bias=True,
activation_fn="gelu-approximate",
num_embeds_ada_norm=1000,
norm_type="ada_norm_zero",
norm_elementwise_affine=False,
)
vae = AutoencoderKL()
scheduler = DDIMScheduler()
components = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def test_inference(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
image = pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 16, 16, 3))
expected_slice = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457])
max_diff = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(max_diff, 1e-3)
def test_inference_batch_single_identical(self):
self._test_inference_batch_single_identical(expected_max_diff=1e-3)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),
reason="XFormers attention is only available with CUDA and `xformers` installed",
)
def test_xformers_attention_forwardGenerator_pass(self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
@nightly
@require_torch_gpu
class DiTPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_dit_256(self):
generator = torch.manual_seed(0)
pipe = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256")
pipe.to("cuda")
words = ["vase", "umbrella", "white shark", "white wolf"]
ids = pipe.get_label_ids(words)
images = pipe(ids, generator=generator, num_inference_steps=40, output_type="np").images
for word, image in zip(words, images):
expected_image = load_numpy(
f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy"
)
assert np.abs((expected_image - image).max()) < 1e-2
def test_dit_512(self):
pipe = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512")
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.to("cuda")
words = ["vase", "umbrella"]
ids = pipe.get_label_ids(words)
generator = torch.manual_seed(0)
images = pipe(ids, generator=generator, num_inference_steps=25, output_type="np").images
for word, image in zip(words, images):
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f"/dit/{word}_512.npy"
)
assert np.abs((expected_image - image).max()) < 1e-1
| 0
|
hf_public_repos/diffusers/tests/pipelines
|
hf_public_repos/diffusers/tests/pipelines/unidiffuser/test_unidiffuser.py
|
import gc
import random
import traceback
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionModelWithProjection,
GPT2Tokenizer,
)
from diffusers import (
AutoencoderKL,
DPMSolverMultistepScheduler,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
)
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
nightly,
require_torch_2,
require_torch_gpu,
run_test_in_subprocess,
torch_device,
)
from diffusers.utils.torch_utils import randn_tensor
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
# Will be run via run_test_in_subprocess
def _test_unidiffuser_compile(in_queue, out_queue, timeout):
error = None
try:
inputs = in_queue.get(timeout=timeout)
torch_device = inputs.pop("torch_device")
seed = inputs.pop("seed")
inputs["generator"] = torch.Generator(device=torch_device).manual_seed(seed)
pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1")
# pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
pipe = pipe.to(torch_device)
pipe.unet.to(memory_format=torch.channels_last)
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
pipe.set_progress_bar_config(disable=None)
image = pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
expected_slice = np.array([0.2402, 0.2375, 0.2285, 0.2378, 0.2407, 0.2263, 0.2354, 0.2307, 0.2520])
assert np.abs(image_slice - expected_slice).max() < 1e-1
except Exception:
error = f"{traceback.format_exc()}"
results = {"error": error}
out_queue.put(results, timeout=timeout)
out_queue.join()
class UniDiffuserPipelineFastTests(
PipelineTesterMixin, PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase
):
pipeline_class = UniDiffuserPipeline
params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS
# vae_latents, not latents, is the argument that corresponds to VAE latent inputs
image_latents_params = frozenset(["vae_latents"])
def get_dummy_components(self):
unet = UniDiffuserModel.from_pretrained(
"hf-internal-testing/unidiffuser-diffusers-test",
subfolder="unet",
)
scheduler = DPMSolverMultistepScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
solver_order=3,
)
vae = AutoencoderKL.from_pretrained(
"hf-internal-testing/unidiffuser-diffusers-test",
subfolder="vae",
)
text_encoder = CLIPTextModel.from_pretrained(
"hf-internal-testing/unidiffuser-diffusers-test",
subfolder="text_encoder",
)
clip_tokenizer = CLIPTokenizer.from_pretrained(
"hf-internal-testing/unidiffuser-diffusers-test",
subfolder="clip_tokenizer",
)
image_encoder = CLIPVisionModelWithProjection.from_pretrained(
"hf-internal-testing/unidiffuser-diffusers-test",
subfolder="image_encoder",
)
# From the Stable Diffusion Image Variation pipeline tests
clip_image_processor = CLIPImageProcessor(crop_size=32, size=32)
# image_processor = CLIPImageProcessor.from_pretrained("hf-internal-testing/tiny-random-clip")
text_tokenizer = GPT2Tokenizer.from_pretrained(
"hf-internal-testing/unidiffuser-diffusers-test",
subfolder="text_tokenizer",
)
text_decoder = UniDiffuserTextDecoder.from_pretrained(
"hf-internal-testing/unidiffuser-diffusers-test",
subfolder="text_decoder",
)
components = {
"vae": vae,
"text_encoder": text_encoder,
"image_encoder": image_encoder,
"clip_image_processor": clip_image_processor,
"clip_tokenizer": clip_tokenizer,
"text_decoder": text_decoder,
"text_tokenizer": text_tokenizer,
"unet": unet,
"scheduler": scheduler,
}
return components
def get_dummy_inputs(self, device, seed=0):
image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
image = image.cpu().permute(0, 2, 3, 1)[0]
image = Image.fromarray(np.uint8(image)).convert("RGB")
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "an elephant under the sea",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def get_fixed_latents(self, device, seed=0):
if isinstance(device, str):
device = torch.device(device)
generator = torch.Generator(device=device).manual_seed(seed)
# Hardcode the shapes for now.
prompt_latents = randn_tensor((1, 77, 32), generator=generator, device=device, dtype=torch.float32)
vae_latents = randn_tensor((1, 4, 16, 16), generator=generator, device=device, dtype=torch.float32)
clip_latents = randn_tensor((1, 1, 32), generator=generator, device=device, dtype=torch.float32)
latents = {
"prompt_latents": prompt_latents,
"vae_latents": vae_latents,
"clip_latents": clip_latents,
}
return latents
def get_dummy_inputs_with_latents(self, device, seed=0):
# image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
# image = image.cpu().permute(0, 2, 3, 1)[0]
# image = Image.fromarray(np.uint8(image)).convert("RGB")
image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unidiffuser/unidiffuser_example_image.jpg",
)
image = image.resize((32, 32))
latents = self.get_fixed_latents(device, seed=seed)
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "an elephant under the sea",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"prompt_latents": latents.get("prompt_latents"),
"vae_latents": latents.get("vae_latents"),
"clip_latents": latents.get("clip_latents"),
}
return inputs
def test_unidiffuser_default_joint_v0(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
unidiffuser_pipe = UniDiffuserPipeline(**components)
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'joint'
unidiffuser_pipe.set_joint_mode()
assert unidiffuser_pipe.mode == "joint"
# inputs = self.get_dummy_inputs(device)
inputs = self.get_dummy_inputs_with_latents(device)
# Delete prompt and image for joint inference.
del inputs["prompt"]
del inputs["image"]
sample = unidiffuser_pipe(**inputs)
image = sample.images
text = sample.text
assert image.shape == (1, 32, 32, 3)
image_slice = image[0, -3:, -3:, -1]
expected_img_slice = np.array([0.5760, 0.6270, 0.6571, 0.4965, 0.4638, 0.5663, 0.5254, 0.5068, 0.5716])
assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3
expected_text_prefix = " no no no "
assert text[0][:10] == expected_text_prefix
def test_unidiffuser_default_joint_no_cfg_v0(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
unidiffuser_pipe = UniDiffuserPipeline(**components)
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'joint'
unidiffuser_pipe.set_joint_mode()
assert unidiffuser_pipe.mode == "joint"
# inputs = self.get_dummy_inputs(device)
inputs = self.get_dummy_inputs_with_latents(device)
# Delete prompt and image for joint inference.
del inputs["prompt"]
del inputs["image"]
# Set guidance scale to 1.0 to turn off CFG
inputs["guidance_scale"] = 1.0
sample = unidiffuser_pipe(**inputs)
image = sample.images
text = sample.text
assert image.shape == (1, 32, 32, 3)
image_slice = image[0, -3:, -3:, -1]
expected_img_slice = np.array([0.5760, 0.6270, 0.6571, 0.4965, 0.4638, 0.5663, 0.5254, 0.5068, 0.5716])
assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3
expected_text_prefix = " no no no "
assert text[0][:10] == expected_text_prefix
def test_unidiffuser_default_text2img_v0(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
unidiffuser_pipe = UniDiffuserPipeline(**components)
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'text2img'
unidiffuser_pipe.set_text_to_image_mode()
assert unidiffuser_pipe.mode == "text2img"
inputs = self.get_dummy_inputs_with_latents(device)
# Delete image for text-conditioned image generation
del inputs["image"]
image = unidiffuser_pipe(**inputs).images
assert image.shape == (1, 32, 32, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.5758, 0.6269, 0.6570, 0.4967, 0.4639, 0.5664, 0.5257, 0.5067, 0.5715])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_unidiffuser_default_image_0(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
unidiffuser_pipe = UniDiffuserPipeline(**components)
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'img'
unidiffuser_pipe.set_image_mode()
assert unidiffuser_pipe.mode == "img"
inputs = self.get_dummy_inputs(device)
# Delete prompt and image for unconditional ("marginal") text generation.
del inputs["prompt"]
del inputs["image"]
image = unidiffuser_pipe(**inputs).images
assert image.shape == (1, 32, 32, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.5760, 0.6270, 0.6571, 0.4966, 0.4638, 0.5663, 0.5254, 0.5068, 0.5715])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_unidiffuser_default_text_v0(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
unidiffuser_pipe = UniDiffuserPipeline(**components)
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'img'
unidiffuser_pipe.set_text_mode()
assert unidiffuser_pipe.mode == "text"
inputs = self.get_dummy_inputs(device)
# Delete prompt and image for unconditional ("marginal") text generation.
del inputs["prompt"]
del inputs["image"]
text = unidiffuser_pipe(**inputs).text
expected_text_prefix = " no no no "
assert text[0][:10] == expected_text_prefix
def test_unidiffuser_default_img2text_v0(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
unidiffuser_pipe = UniDiffuserPipeline(**components)
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'img2text'
unidiffuser_pipe.set_image_to_text_mode()
assert unidiffuser_pipe.mode == "img2text"
inputs = self.get_dummy_inputs_with_latents(device)
# Delete text for image-conditioned text generation
del inputs["prompt"]
text = unidiffuser_pipe(**inputs).text
expected_text_prefix = " no no no "
assert text[0][:10] == expected_text_prefix
def test_unidiffuser_default_joint_v1(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
unidiffuser_pipe = UniDiffuserPipeline.from_pretrained("hf-internal-testing/unidiffuser-test-v1")
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'joint'
unidiffuser_pipe.set_joint_mode()
assert unidiffuser_pipe.mode == "joint"
# inputs = self.get_dummy_inputs(device)
inputs = self.get_dummy_inputs_with_latents(device)
# Delete prompt and image for joint inference.
del inputs["prompt"]
del inputs["image"]
inputs["data_type"] = 1
sample = unidiffuser_pipe(**inputs)
image = sample.images
text = sample.text
assert image.shape == (1, 32, 32, 3)
image_slice = image[0, -3:, -3:, -1]
expected_img_slice = np.array([0.5760, 0.6270, 0.6571, 0.4965, 0.4638, 0.5663, 0.5254, 0.5068, 0.5716])
assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3
expected_text_prefix = " no no no "
assert text[0][:10] == expected_text_prefix
def test_unidiffuser_default_text2img_v1(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
unidiffuser_pipe = UniDiffuserPipeline.from_pretrained("hf-internal-testing/unidiffuser-test-v1")
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'text2img'
unidiffuser_pipe.set_text_to_image_mode()
assert unidiffuser_pipe.mode == "text2img"
inputs = self.get_dummy_inputs_with_latents(device)
# Delete image for text-conditioned image generation
del inputs["image"]
image = unidiffuser_pipe(**inputs).images
assert image.shape == (1, 32, 32, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.5758, 0.6269, 0.6570, 0.4967, 0.4639, 0.5664, 0.5257, 0.5067, 0.5715])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_unidiffuser_default_img2text_v1(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
unidiffuser_pipe = UniDiffuserPipeline.from_pretrained("hf-internal-testing/unidiffuser-test-v1")
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'img2text'
unidiffuser_pipe.set_image_to_text_mode()
assert unidiffuser_pipe.mode == "img2text"
inputs = self.get_dummy_inputs_with_latents(device)
# Delete text for image-conditioned text generation
del inputs["prompt"]
text = unidiffuser_pipe(**inputs).text
expected_text_prefix = " no no no "
assert text[0][:10] == expected_text_prefix
def test_unidiffuser_text2img_multiple_images(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
unidiffuser_pipe = UniDiffuserPipeline(**components)
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'text2img'
unidiffuser_pipe.set_text_to_image_mode()
assert unidiffuser_pipe.mode == "text2img"
inputs = self.get_dummy_inputs(device)
# Delete image for text-conditioned image generation
del inputs["image"]
inputs["num_images_per_prompt"] = 2
inputs["num_prompts_per_image"] = 3
image = unidiffuser_pipe(**inputs).images
assert image.shape == (2, 32, 32, 3)
def test_unidiffuser_img2text_multiple_prompts(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
unidiffuser_pipe = UniDiffuserPipeline(**components)
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'img2text'
unidiffuser_pipe.set_image_to_text_mode()
assert unidiffuser_pipe.mode == "img2text"
inputs = self.get_dummy_inputs(device)
# Delete text for image-conditioned text generation
del inputs["prompt"]
inputs["num_images_per_prompt"] = 2
inputs["num_prompts_per_image"] = 3
text = unidiffuser_pipe(**inputs).text
assert len(text) == 3
def test_unidiffuser_text2img_multiple_images_with_latents(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
unidiffuser_pipe = UniDiffuserPipeline(**components)
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'text2img'
unidiffuser_pipe.set_text_to_image_mode()
assert unidiffuser_pipe.mode == "text2img"
inputs = self.get_dummy_inputs_with_latents(device)
# Delete image for text-conditioned image generation
del inputs["image"]
inputs["num_images_per_prompt"] = 2
inputs["num_prompts_per_image"] = 3
image = unidiffuser_pipe(**inputs).images
assert image.shape == (2, 32, 32, 3)
def test_unidiffuser_img2text_multiple_prompts_with_latents(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
unidiffuser_pipe = UniDiffuserPipeline(**components)
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'img2text'
unidiffuser_pipe.set_image_to_text_mode()
assert unidiffuser_pipe.mode == "img2text"
inputs = self.get_dummy_inputs_with_latents(device)
# Delete text for image-conditioned text generation
del inputs["prompt"]
inputs["num_images_per_prompt"] = 2
inputs["num_prompts_per_image"] = 3
text = unidiffuser_pipe(**inputs).text
assert len(text) == 3
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=2e-4)
@require_torch_gpu
def test_unidiffuser_default_joint_v1_cuda_fp16(self):
device = "cuda"
unidiffuser_pipe = UniDiffuserPipeline.from_pretrained(
"hf-internal-testing/unidiffuser-test-v1", torch_dtype=torch.float16
)
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'joint'
unidiffuser_pipe.set_joint_mode()
assert unidiffuser_pipe.mode == "joint"
inputs = self.get_dummy_inputs_with_latents(device)
# Delete prompt and image for joint inference.
del inputs["prompt"]
del inputs["image"]
inputs["data_type"] = 1
sample = unidiffuser_pipe(**inputs)
image = sample.images
text = sample.text
assert image.shape == (1, 32, 32, 3)
image_slice = image[0, -3:, -3:, -1]
expected_img_slice = np.array([0.5049, 0.5498, 0.5854, 0.3052, 0.4460, 0.6489, 0.5122, 0.4810, 0.6138])
assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3
expected_text_prefix = '" This This'
assert text[0][: len(expected_text_prefix)] == expected_text_prefix
@require_torch_gpu
def test_unidiffuser_default_text2img_v1_cuda_fp16(self):
device = "cuda"
unidiffuser_pipe = UniDiffuserPipeline.from_pretrained(
"hf-internal-testing/unidiffuser-test-v1", torch_dtype=torch.float16
)
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'text2img'
unidiffuser_pipe.set_text_to_image_mode()
assert unidiffuser_pipe.mode == "text2img"
inputs = self.get_dummy_inputs_with_latents(device)
# Delete prompt and image for joint inference.
del inputs["image"]
inputs["data_type"] = 1
sample = unidiffuser_pipe(**inputs)
image = sample.images
assert image.shape == (1, 32, 32, 3)
image_slice = image[0, -3:, -3:, -1]
expected_img_slice = np.array([0.5054, 0.5498, 0.5854, 0.3052, 0.4458, 0.6489, 0.5122, 0.4810, 0.6138])
assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3
@require_torch_gpu
def test_unidiffuser_default_img2text_v1_cuda_fp16(self):
device = "cuda"
unidiffuser_pipe = UniDiffuserPipeline.from_pretrained(
"hf-internal-testing/unidiffuser-test-v1", torch_dtype=torch.float16
)
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'img2text'
unidiffuser_pipe.set_image_to_text_mode()
assert unidiffuser_pipe.mode == "img2text"
inputs = self.get_dummy_inputs_with_latents(device)
# Delete prompt and image for joint inference.
del inputs["prompt"]
inputs["data_type"] = 1
text = unidiffuser_pipe(**inputs).text
expected_text_prefix = '" This This'
assert text[0][: len(expected_text_prefix)] == expected_text_prefix
@nightly
@require_torch_gpu
class UniDiffuserPipelineSlowTests(unittest.TestCase):
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def get_inputs(self, device, seed=0, generate_latents=False):
generator = torch.manual_seed(seed)
image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unidiffuser/unidiffuser_example_image.jpg"
)
inputs = {
"prompt": "an elephant under the sea",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 8.0,
"output_type": "numpy",
}
if generate_latents:
latents = self.get_fixed_latents(device, seed=seed)
for latent_name, latent_tensor in latents.items():
inputs[latent_name] = latent_tensor
return inputs
def get_fixed_latents(self, device, seed=0):
if isinstance(device, str):
device = torch.device(device)
latent_device = torch.device("cpu")
generator = torch.Generator(device=latent_device).manual_seed(seed)
# Hardcode the shapes for now.
prompt_latents = randn_tensor((1, 77, 768), generator=generator, device=device, dtype=torch.float32)
vae_latents = randn_tensor((1, 4, 64, 64), generator=generator, device=device, dtype=torch.float32)
clip_latents = randn_tensor((1, 1, 512), generator=generator, device=device, dtype=torch.float32)
# Move latents onto desired device.
prompt_latents = prompt_latents.to(device)
vae_latents = vae_latents.to(device)
clip_latents = clip_latents.to(device)
latents = {
"prompt_latents": prompt_latents,
"vae_latents": vae_latents,
"clip_latents": clip_latents,
}
return latents
def test_unidiffuser_default_joint_v1(self):
pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1")
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
# inputs = self.get_dummy_inputs(device)
inputs = self.get_inputs(device=torch_device, generate_latents=True)
# Delete prompt and image for joint inference.
del inputs["prompt"]
del inputs["image"]
sample = pipe(**inputs)
image = sample.images
text = sample.text
assert image.shape == (1, 512, 512, 3)
image_slice = image[0, -3:, -3:, -1]
expected_img_slice = np.array([0.2402, 0.2375, 0.2285, 0.2378, 0.2407, 0.2263, 0.2354, 0.2307, 0.2520])
assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-1
expected_text_prefix = "a living room"
assert text[0][: len(expected_text_prefix)] == expected_text_prefix
def test_unidiffuser_default_text2img_v1(self):
pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1")
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
inputs = self.get_inputs(device=torch_device, generate_latents=True)
del inputs["image"]
sample = pipe(**inputs)
image = sample.images
assert image.shape == (1, 512, 512, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.0242, 0.0103, 0.0022, 0.0129, 0.0000, 0.0090, 0.0376, 0.0508, 0.0005])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def test_unidiffuser_default_img2text_v1(self):
pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1")
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
inputs = self.get_inputs(device=torch_device, generate_latents=True)
del inputs["prompt"]
sample = pipe(**inputs)
text = sample.text
expected_text_prefix = "An astronaut"
assert text[0][: len(expected_text_prefix)] == expected_text_prefix
@unittest.skip(reason="Skip torch.compile test to speed up the slow test suite.")
@require_torch_2
def test_unidiffuser_compile(self, seed=0):
inputs = self.get_inputs(torch_device, seed=seed, generate_latents=True)
# Delete prompt and image for joint inference.
del inputs["prompt"]
del inputs["image"]
# Can't pickle a Generator object
del inputs["generator"]
inputs["torch_device"] = torch_device
inputs["seed"] = seed
run_test_in_subprocess(test_case=self, target_func=_test_unidiffuser_compile, inputs=inputs)
@nightly
@require_torch_gpu
class UniDiffuserPipelineNightlyTests(unittest.TestCase):
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def get_inputs(self, device, seed=0, generate_latents=False):
generator = torch.manual_seed(seed)
image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unidiffuser/unidiffuser_example_image.jpg"
)
inputs = {
"prompt": "an elephant under the sea",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 8.0,
"output_type": "numpy",
}
if generate_latents:
latents = self.get_fixed_latents(device, seed=seed)
for latent_name, latent_tensor in latents.items():
inputs[latent_name] = latent_tensor
return inputs
def get_fixed_latents(self, device, seed=0):
if isinstance(device, str):
device = torch.device(device)
latent_device = torch.device("cpu")
generator = torch.Generator(device=latent_device).manual_seed(seed)
# Hardcode the shapes for now.
prompt_latents = randn_tensor((1, 77, 768), generator=generator, device=device, dtype=torch.float32)
vae_latents = randn_tensor((1, 4, 64, 64), generator=generator, device=device, dtype=torch.float32)
clip_latents = randn_tensor((1, 1, 512), generator=generator, device=device, dtype=torch.float32)
# Move latents onto desired device.
prompt_latents = prompt_latents.to(device)
vae_latents = vae_latents.to(device)
clip_latents = clip_latents.to(device)
latents = {
"prompt_latents": prompt_latents,
"vae_latents": vae_latents,
"clip_latents": clip_latents,
}
return latents
def test_unidiffuser_default_joint_v1_fp16(self):
pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1", torch_dtype=torch.float16)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
# inputs = self.get_dummy_inputs(device)
inputs = self.get_inputs(device=torch_device, generate_latents=True)
# Delete prompt and image for joint inference.
del inputs["prompt"]
del inputs["image"]
sample = pipe(**inputs)
image = sample.images
text = sample.text
assert image.shape == (1, 512, 512, 3)
image_slice = image[0, -3:, -3:, -1]
expected_img_slice = np.array([0.2402, 0.2375, 0.2285, 0.2378, 0.2407, 0.2263, 0.2354, 0.2307, 0.2520])
assert np.abs(image_slice.flatten() - expected_img_slice).max() < 2e-1
expected_text_prefix = "a living room"
assert text[0][: len(expected_text_prefix)] == expected_text_prefix
def test_unidiffuser_default_text2img_v1_fp16(self):
pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1", torch_dtype=torch.float16)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
inputs = self.get_inputs(device=torch_device, generate_latents=True)
del inputs["image"]
sample = pipe(**inputs)
image = sample.images
assert image.shape == (1, 512, 512, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.0242, 0.0103, 0.0022, 0.0129, 0.0000, 0.0090, 0.0376, 0.0508, 0.0005])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def test_unidiffuser_default_img2text_v1_fp16(self):
pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1", torch_dtype=torch.float16)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
inputs = self.get_inputs(device=torch_device, generate_latents=True)
del inputs["prompt"]
sample = pipe(**inputs)
text = sample.text
expected_text_prefix = "An astronaut"
assert text[0][: len(expected_text_prefix)] == expected_text_prefix
| 0
|
hf_public_repos/diffusers/tests/pipelines
|
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyV22ControlnetPipeline,
KandinskyV22PriorPipeline,
UNet2DConditionModel,
VQModel,
)
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
nightly,
require_torch_gpu,
torch_device,
)
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class KandinskyV22ControlnetPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = KandinskyV22ControlnetPipeline
params = ["image_embeds", "negative_image_embeds", "hint"]
batch_params = ["image_embeds", "negative_image_embeds", "hint"]
required_optional_params = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
test_xformers_attention = False
@property
def text_embedder_hidden_size(self):
return 32
@property
def time_input_dim(self):
return 32
@property
def block_out_channels_0(self):
return self.time_input_dim
@property
def time_embed_dim(self):
return self.time_input_dim * 4
@property
def cross_attention_dim(self):
return 100
@property
def dummy_unet(self):
torch.manual_seed(0)
model_kwargs = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
model = UNet2DConditionModel(**model_kwargs)
return model
@property
def dummy_movq_kwargs(self):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def dummy_movq(self):
torch.manual_seed(0)
model = VQModel(**self.dummy_movq_kwargs)
return model
def get_dummy_components(self):
unet = self.dummy_unet
movq = self.dummy_movq
scheduler = DDIMScheduler(
num_train_timesteps=1000,
beta_schedule="linear",
beta_start=0.00085,
beta_end=0.012,
clip_sample=False,
set_alpha_to_one=False,
steps_offset=1,
prediction_type="epsilon",
thresholding=False,
)
components = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def get_dummy_inputs(self, device, seed=0):
image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device)
negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to(
device
)
# create hint
hint = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device)
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def test_kandinsky_controlnet(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
output = pipe(**self.get_dummy_inputs(device))
image = output.images
image_from_tuple = pipe(
**self.get_dummy_inputs(device),
return_dict=False,
)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595]
)
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=1e-1)
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=5e-4)
@nightly
@require_torch_gpu
class KandinskyV22ControlnetPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_kandinsky_controlnet(self):
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy"
)
hint = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png"
)
hint = torch.from_numpy(np.array(hint)).float() / 255.0
hint = hint.permute(2, 0, 1).unsqueeze(0)
pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
)
pipe_prior.to(torch_device)
pipeline = KandinskyV22ControlnetPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
)
pipeline = pipeline.to(torch_device)
pipeline.set_progress_bar_config(disable=None)
prompt = "A robot, 4k photo"
generator = torch.Generator(device="cuda").manual_seed(0)
image_emb, zero_image_emb = pipe_prior(
prompt,
generator=generator,
num_inference_steps=5,
negative_prompt="",
).to_tuple()
generator = torch.Generator(device="cuda").manual_seed(0)
output = pipeline(
image_embeds=image_emb,
negative_image_embeds=zero_image_emb,
hint=hint,
generator=generator,
num_inference_steps=100,
output_type="np",
)
image = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(image, expected_image)
| 0
|
hf_public_repos/diffusers/tests/pipelines
|
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_combined.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from diffusers import (
KandinskyV22CombinedPipeline,
KandinskyV22Img2ImgCombinedPipeline,
KandinskyV22InpaintCombinedPipeline,
)
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin
from .test_kandinsky import Dummies
from .test_kandinsky_img2img import Dummies as Img2ImgDummies
from .test_kandinsky_inpaint import Dummies as InpaintDummies
from .test_kandinsky_prior import Dummies as PriorDummies
enable_full_determinism()
class KandinskyV22PipelineCombinedFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = KandinskyV22CombinedPipeline
params = [
"prompt",
]
batch_params = ["prompt", "negative_prompt"]
required_optional_params = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
test_xformers_attention = True
callback_cfg_params = ["image_embds"]
def get_dummy_components(self):
dummy = Dummies()
prior_dummy = PriorDummies()
components = dummy.get_dummy_components()
components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()})
return components
def get_dummy_inputs(self, device, seed=0):
prior_dummy = PriorDummies()
inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed)
inputs.update(
{
"height": 64,
"width": 64,
}
)
return inputs
def test_kandinsky(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
output = pipe(**self.get_dummy_inputs(device))
image = output.images
image_from_tuple = pipe(
**self.get_dummy_inputs(device),
return_dict=False,
)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.3013, 0.0471, 0.5176, 0.1817, 0.2566, 0.7076, 0.6712, 0.4421, 0.7503])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@require_torch_gpu
def test_offloads(self):
pipes = []
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components).to(torch_device)
pipes.append(sd_pipe)
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components)
sd_pipe.enable_model_cpu_offload()
pipes.append(sd_pipe)
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components)
sd_pipe.enable_sequential_cpu_offload()
pipes.append(sd_pipe)
image_slices = []
for pipe in pipes:
inputs = self.get_dummy_inputs(torch_device)
image = pipe(**inputs).images
image_slices.append(image[0, -3:, -3:, -1].flatten())
assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=1e-2)
def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=5e-1)
def test_dict_tuple_outputs_equivalent(self):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4)
def test_model_cpu_offload_forward_pass(self):
super().test_model_cpu_offload_forward_pass(expected_max_diff=5e-4)
def test_save_load_local(self):
super().test_save_load_local(expected_max_difference=5e-3)
def test_save_load_optional_components(self):
super().test_save_load_optional_components(expected_max_difference=5e-3)
def test_callback_inputs(self):
pass
def test_callback_cfg(self):
pass
class KandinskyV22PipelineImg2ImgCombinedFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = KandinskyV22Img2ImgCombinedPipeline
params = ["prompt", "image"]
batch_params = ["prompt", "negative_prompt", "image"]
required_optional_params = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
test_xformers_attention = False
callback_cfg_params = ["image_embds"]
def get_dummy_components(self):
dummy = Img2ImgDummies()
prior_dummy = PriorDummies()
components = dummy.get_dummy_components()
components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()})
return components
def get_dummy_inputs(self, device, seed=0):
prior_dummy = PriorDummies()
dummy = Img2ImgDummies()
inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed)
inputs.update(dummy.get_dummy_inputs(device=device, seed=seed))
inputs.pop("image_embeds")
inputs.pop("negative_image_embeds")
return inputs
def test_kandinsky(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
output = pipe(**self.get_dummy_inputs(device))
image = output.images
image_from_tuple = pipe(
**self.get_dummy_inputs(device),
return_dict=False,
)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.4353, 0.4710, 0.5128, 0.4806, 0.5054, 0.5348, 0.5224, 0.4603, 0.5025])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@require_torch_gpu
def test_offloads(self):
pipes = []
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components).to(torch_device)
pipes.append(sd_pipe)
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components)
sd_pipe.enable_model_cpu_offload()
pipes.append(sd_pipe)
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components)
sd_pipe.enable_sequential_cpu_offload()
pipes.append(sd_pipe)
image_slices = []
for pipe in pipes:
inputs = self.get_dummy_inputs(torch_device)
image = pipe(**inputs).images
image_slices.append(image[0, -3:, -3:, -1].flatten())
assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=1e-2)
def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=2e-1)
def test_dict_tuple_outputs_equivalent(self):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4)
def test_model_cpu_offload_forward_pass(self):
super().test_model_cpu_offload_forward_pass(expected_max_diff=5e-4)
def test_save_load_optional_components(self):
super().test_save_load_optional_components(expected_max_difference=5e-4)
def save_load_local(self):
super().test_save_load_local(expected_max_difference=5e-3)
def test_callback_inputs(self):
pass
def test_callback_cfg(self):
pass
class KandinskyV22PipelineInpaintCombinedFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = KandinskyV22InpaintCombinedPipeline
params = ["prompt", "image", "mask_image"]
batch_params = ["prompt", "negative_prompt", "image", "mask_image"]
required_optional_params = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
test_xformers_attention = False
def get_dummy_components(self):
dummy = InpaintDummies()
prior_dummy = PriorDummies()
components = dummy.get_dummy_components()
components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()})
return components
def get_dummy_inputs(self, device, seed=0):
prior_dummy = PriorDummies()
dummy = InpaintDummies()
inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed)
inputs.update(dummy.get_dummy_inputs(device=device, seed=seed))
inputs.pop("image_embeds")
inputs.pop("negative_image_embeds")
return inputs
def test_kandinsky(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
output = pipe(**self.get_dummy_inputs(device))
image = output.images
image_from_tuple = pipe(
**self.get_dummy_inputs(device),
return_dict=False,
)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.5039, 0.4926, 0.4898, 0.4978, 0.4838, 0.4942, 0.4738, 0.4702, 0.4816])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@require_torch_gpu
def test_offloads(self):
pipes = []
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components).to(torch_device)
pipes.append(sd_pipe)
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components)
sd_pipe.enable_model_cpu_offload()
pipes.append(sd_pipe)
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components)
sd_pipe.enable_sequential_cpu_offload()
pipes.append(sd_pipe)
image_slices = []
for pipe in pipes:
inputs = self.get_dummy_inputs(torch_device)
image = pipe(**inputs).images
image_slices.append(image[0, -3:, -3:, -1].flatten())
assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=1e-2)
def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=5e-1)
def test_dict_tuple_outputs_equivalent(self):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4)
def test_model_cpu_offload_forward_pass(self):
super().test_model_cpu_offload_forward_pass(expected_max_diff=5e-4)
def test_save_load_local(self):
super().test_save_load_local(expected_max_difference=5e-3)
def test_save_load_optional_components(self):
super().test_save_load_optional_components(expected_max_difference=5e-4)
def test_sequential_cpu_offload_forward_pass(self):
super().test_sequential_cpu_offload_forward_pass(expected_max_diff=5e-4)
def test_callback_inputs(self):
pass
def test_callback_cfg(self):
pass
| 0
|
hf_public_repos/diffusers/tests/pipelines
|
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_inpaint.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyV22InpaintPipeline,
KandinskyV22PriorPipeline,
UNet2DConditionModel,
VQModel,
)
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
slow,
torch_device,
)
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class Dummies:
@property
def text_embedder_hidden_size(self):
return 32
@property
def time_input_dim(self):
return 32
@property
def block_out_channels_0(self):
return self.time_input_dim
@property
def time_embed_dim(self):
return self.time_input_dim * 4
@property
def cross_attention_dim(self):
return 32
@property
def dummy_unet(self):
torch.manual_seed(0)
model_kwargs = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
model = UNet2DConditionModel(**model_kwargs)
return model
@property
def dummy_movq_kwargs(self):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def dummy_movq(self):
torch.manual_seed(0)
model = VQModel(**self.dummy_movq_kwargs)
return model
def get_dummy_components(self):
unet = self.dummy_unet
movq = self.dummy_movq
scheduler = DDIMScheduler(
num_train_timesteps=1000,
beta_schedule="linear",
beta_start=0.00085,
beta_end=0.012,
clip_sample=False,
set_alpha_to_one=False,
steps_offset=1,
prediction_type="epsilon",
thresholding=False,
)
components = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def get_dummy_inputs(self, device, seed=0):
image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device)
negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to(
device
)
# create init_image
image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device)
image = image.cpu().permute(0, 2, 3, 1)[0]
init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256))
# create mask
mask = np.zeros((64, 64), dtype=np.float32)
mask[:32, :32] = 1
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
class KandinskyV22InpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = KandinskyV22InpaintPipeline
params = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
batch_params = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
required_optional_params = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
test_xformers_attention = False
callback_cfg_params = ["image_embeds", "masked_image", "mask_image"]
def get_dummy_components(self):
dummies = Dummies()
return dummies.get_dummy_components()
def get_dummy_inputs(self, device, seed=0):
dummies = Dummies()
return dummies.get_dummy_inputs(device=device, seed=seed)
def test_kandinsky_inpaint(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
output = pipe(**self.get_dummy_inputs(device))
image = output.images
image_from_tuple = pipe(
**self.get_dummy_inputs(device),
return_dict=False,
)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array(
[0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848]
)
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=5e-1)
def test_model_cpu_offload_forward_pass(self):
super().test_inference_batch_single_identical(expected_max_diff=5e-4)
def test_save_load_optional_components(self):
super().test_save_load_optional_components(expected_max_difference=5e-4)
def test_sequential_cpu_offload_forward_pass(self):
super().test_sequential_cpu_offload_forward_pass(expected_max_diff=5e-4)
# override default test because we need to zero out mask too in order to make sure final latent is all zero
def test_callback_inputs(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
self.assertTrue(
hasattr(pipe, "_callback_tensor_inputs"),
f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs",
)
def callback_inputs_test(pipe, i, t, callback_kwargs):
missing_callback_inputs = set()
for v in pipe._callback_tensor_inputs:
if v not in callback_kwargs:
missing_callback_inputs.add(v)
self.assertTrue(
len(missing_callback_inputs) == 0, f"Missing callback tensor inputs: {missing_callback_inputs}"
)
last_i = pipe.num_timesteps - 1
if i == last_i:
callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"])
callback_kwargs["mask_image"] = torch.zeros_like(callback_kwargs["mask_image"])
return callback_kwargs
inputs = self.get_dummy_inputs(torch_device)
inputs["callback_on_step_end"] = callback_inputs_test
inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs
inputs["output_type"] = "latent"
output = pipe(**inputs)[0]
assert output.abs().sum() == 0
@slow
@require_torch_gpu
class KandinskyV22InpaintPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_kandinsky_inpaint(self):
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy"
)
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png"
)
mask = np.zeros((768, 768), dtype=np.float32)
mask[:250, 250:-250] = 1
prompt = "a hat"
pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
)
pipe_prior.to(torch_device)
pipeline = KandinskyV22InpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16
)
pipeline = pipeline.to(torch_device)
pipeline.set_progress_bar_config(disable=None)
generator = torch.Generator(device="cpu").manual_seed(0)
image_emb, zero_image_emb = pipe_prior(
prompt,
generator=generator,
num_inference_steps=5,
negative_prompt="",
).to_tuple()
output = pipeline(
image=init_image,
mask_image=mask,
image_embeds=image_emb,
negative_image_embeds=zero_image_emb,
generator=generator,
num_inference_steps=100,
height=768,
width=768,
output_type="np",
)
image = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(image, expected_image)
| 0
|
hf_public_repos/diffusers/tests/pipelines
|
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_img2img.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyV22Img2ImgPipeline,
KandinskyV22PriorPipeline,
UNet2DConditionModel,
VQModel,
)
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
slow,
torch_device,
)
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class Dummies:
@property
def text_embedder_hidden_size(self):
return 32
@property
def time_input_dim(self):
return 32
@property
def block_out_channels_0(self):
return self.time_input_dim
@property
def time_embed_dim(self):
return self.time_input_dim * 4
@property
def cross_attention_dim(self):
return 32
@property
def dummy_unet(self):
torch.manual_seed(0)
model_kwargs = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
model = UNet2DConditionModel(**model_kwargs)
return model
@property
def dummy_movq_kwargs(self):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def dummy_movq(self):
torch.manual_seed(0)
model = VQModel(**self.dummy_movq_kwargs)
return model
def get_dummy_components(self):
unet = self.dummy_unet
movq = self.dummy_movq
ddim_config = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.00085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
scheduler = DDIMScheduler(**ddim_config)
components = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def get_dummy_inputs(self, device, seed=0):
image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device)
negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to(
device
)
# create init_image
image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device)
image = image.cpu().permute(0, 2, 3, 1)[0]
init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256))
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
class KandinskyV22Img2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = KandinskyV22Img2ImgPipeline
params = ["image_embeds", "negative_image_embeds", "image"]
batch_params = [
"image_embeds",
"negative_image_embeds",
"image",
]
required_optional_params = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
test_xformers_attention = False
callback_cfg_params = ["image_embeds"]
def get_dummy_components(self):
dummies = Dummies()
return dummies.get_dummy_components()
def get_dummy_inputs(self, device, seed=0):
dummies = Dummies()
return dummies.get_dummy_inputs(device=device, seed=seed)
def test_kandinsky_img2img(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
output = pipe(**self.get_dummy_inputs(device))
image = output.images
image_from_tuple = pipe(
**self.get_dummy_inputs(device),
return_dict=False,
)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.5712, 0.5443, 0.4725, 0.6195, 0.5184, 0.4651, 0.4473, 0.4590, 0.5016])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=2e-1)
@slow
@require_torch_gpu
class KandinskyV22Img2ImgPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_kandinsky_img2img(self):
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy"
)
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png"
)
prompt = "A red cartoon frog, 4k"
pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
)
pipe_prior.to(torch_device)
pipeline = KandinskyV22Img2ImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
)
pipeline = pipeline.to(torch_device)
pipeline.set_progress_bar_config(disable=None)
generator = torch.Generator(device="cpu").manual_seed(0)
image_emb, zero_image_emb = pipe_prior(
prompt,
generator=generator,
num_inference_steps=5,
negative_prompt="",
).to_tuple()
output = pipeline(
image=init_image,
image_embeds=image_emb,
negative_image_embeds=zero_image_emb,
generator=generator,
num_inference_steps=100,
height=768,
width=768,
strength=0.2,
output_type="np",
)
image = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(image, expected_image)
| 0
|
hf_public_repos/diffusers/tests/pipelines
|
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_prior.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyV22PriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps, torch_device
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class Dummies:
@property
def text_embedder_hidden_size(self):
return 32
@property
def time_input_dim(self):
return 32
@property
def block_out_channels_0(self):
return self.time_input_dim
@property
def time_embed_dim(self):
return self.time_input_dim * 4
@property
def cross_attention_dim(self):
return 100
@property
def dummy_tokenizer(self):
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def dummy_text_encoder(self):
torch.manual_seed(0)
config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=self.text_embedder_hidden_size,
projection_dim=self.text_embedder_hidden_size,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
)
return CLIPTextModelWithProjection(config)
@property
def dummy_prior(self):
torch.manual_seed(0)
model_kwargs = {
"num_attention_heads": 2,
"attention_head_dim": 12,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
model = PriorTransformer(**model_kwargs)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
model.clip_std = nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def dummy_image_encoder(self):
torch.manual_seed(0)
config = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size,
image_size=224,
projection_dim=self.text_embedder_hidden_size,
intermediate_size=37,
num_attention_heads=4,
num_channels=3,
num_hidden_layers=5,
patch_size=14,
)
model = CLIPVisionModelWithProjection(config)
return model
@property
def dummy_image_processor(self):
image_processor = CLIPImageProcessor(
crop_size=224,
do_center_crop=True,
do_normalize=True,
do_resize=True,
image_mean=[0.48145466, 0.4578275, 0.40821073],
image_std=[0.26862954, 0.26130258, 0.27577711],
resample=3,
size=224,
)
return image_processor
def get_dummy_components(self):
prior = self.dummy_prior
image_encoder = self.dummy_image_encoder
text_encoder = self.dummy_text_encoder
tokenizer = self.dummy_tokenizer
image_processor = self.dummy_image_processor
scheduler = UnCLIPScheduler(
variance_type="fixed_small_log",
prediction_type="sample",
num_train_timesteps=1000,
clip_sample=True,
clip_sample_range=10.0,
)
components = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
class KandinskyV22PriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = KandinskyV22PriorPipeline
params = ["prompt"]
batch_params = ["prompt", "negative_prompt"]
required_optional_params = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
callback_cfg_params = ["prompt_embeds", "text_encoder_hidden_states", "text_mask"]
test_xformers_attention = False
def get_dummy_components(self):
dummies = Dummies()
return dummies.get_dummy_components()
def get_dummy_inputs(self, device, seed=0):
dummies = Dummies()
return dummies.get_dummy_inputs(device=device, seed=seed)
def test_kandinsky_prior(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
output = pipe(**self.get_dummy_inputs(device))
image = output.image_embeds
image_from_tuple = pipe(
**self.get_dummy_inputs(device),
return_dict=False,
)[0]
image_slice = image[0, -10:]
image_from_tuple_slice = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
expected_slice = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156]
)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def test_inference_batch_single_identical(self):
self._test_inference_batch_single_identical(expected_max_diff=1e-3)
@skip_mps
def test_attention_slicing_forward_pass(self):
test_max_difference = torch_device == "cpu"
test_mean_pixel_difference = False
self._test_attention_slicing_forward_pass(
test_max_difference=test_max_difference,
test_mean_pixel_difference=test_mean_pixel_difference,
)
# override default test because no output_type "latent", use "pt" instead
def test_callback_inputs(self):
sig = inspect.signature(self.pipeline_class.__call__)
if not ("callback_on_step_end_tensor_inputs" in sig.parameters and "callback_on_step_end" in sig.parameters):
return
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
self.assertTrue(
hasattr(pipe, "_callback_tensor_inputs"),
f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs",
)
def callback_inputs_test(pipe, i, t, callback_kwargs):
missing_callback_inputs = set()
for v in pipe._callback_tensor_inputs:
if v not in callback_kwargs:
missing_callback_inputs.add(v)
self.assertTrue(
len(missing_callback_inputs) == 0, f"Missing callback tensor inputs: {missing_callback_inputs}"
)
last_i = pipe.num_timesteps - 1
if i == last_i:
callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"])
return callback_kwargs
inputs = self.get_dummy_inputs(torch_device)
inputs["callback_on_step_end"] = callback_inputs_test
inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs
inputs["num_inference_steps"] = 2
inputs["output_type"] = "pt"
output = pipe(**inputs)[0]
assert output.abs().sum() == 0
| 0
|
hf_public_repos/diffusers/tests/pipelines
|
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet_img2img.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyV22ControlnetImg2ImgPipeline,
KandinskyV22PriorEmb2EmbPipeline,
UNet2DConditionModel,
VQModel,
)
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
nightly,
require_torch_gpu,
torch_device,
)
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class KandinskyV22ControlnetImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = KandinskyV22ControlnetImg2ImgPipeline
params = ["image_embeds", "negative_image_embeds", "image", "hint"]
batch_params = ["image_embeds", "negative_image_embeds", "image", "hint"]
required_optional_params = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
test_xformers_attention = False
@property
def text_embedder_hidden_size(self):
return 32
@property
def time_input_dim(self):
return 32
@property
def block_out_channels_0(self):
return self.time_input_dim
@property
def time_embed_dim(self):
return self.time_input_dim * 4
@property
def cross_attention_dim(self):
return 100
@property
def dummy_unet(self):
torch.manual_seed(0)
model_kwargs = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
model = UNet2DConditionModel(**model_kwargs)
return model
@property
def dummy_movq_kwargs(self):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def dummy_movq(self):
torch.manual_seed(0)
model = VQModel(**self.dummy_movq_kwargs)
return model
def get_dummy_components(self):
unet = self.dummy_unet
movq = self.dummy_movq
ddim_config = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.00085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
scheduler = DDIMScheduler(**ddim_config)
components = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def get_dummy_inputs(self, device, seed=0):
image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device)
negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to(
device
)
# create init_image
image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device)
image = image.cpu().permute(0, 2, 3, 1)[0]
init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256))
# create hint
hint = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device)
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def test_kandinsky_controlnet_img2img(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
output = pipe(**self.get_dummy_inputs(device))
image = output.images
image_from_tuple = pipe(
**self.get_dummy_inputs(device),
return_dict=False,
)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736]
)
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=1.75e-3)
def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=2e-1)
@nightly
@require_torch_gpu
class KandinskyV22ControlnetImg2ImgPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_kandinsky_controlnet_img2img(self):
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy"
)
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png"
)
init_image = init_image.resize((512, 512))
hint = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png"
)
hint = torch.from_numpy(np.array(hint)).float() / 255.0
hint = hint.permute(2, 0, 1).unsqueeze(0)
prompt = "A robot, 4k photo"
pipe_prior = KandinskyV22PriorEmb2EmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
)
pipe_prior.to(torch_device)
pipeline = KandinskyV22ControlnetImg2ImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
)
pipeline = pipeline.to(torch_device)
pipeline.set_progress_bar_config(disable=None)
generator = torch.Generator(device="cpu").manual_seed(0)
image_emb, zero_image_emb = pipe_prior(
prompt,
image=init_image,
strength=0.85,
generator=generator,
negative_prompt="",
).to_tuple()
output = pipeline(
image=init_image,
image_embeds=image_emb,
negative_image_embeds=zero_image_emb,
hint=hint,
generator=generator,
num_inference_steps=100,
height=512,
width=512,
strength=0.5,
output_type="np",
)
image = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(image, expected_image)
| 0
|
hf_public_repos/diffusers/tests/pipelines
|
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_prior_emb2emb.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import unittest
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyV22PriorEmb2EmbPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, skip_mps, torch_device
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class KandinskyV22PriorEmb2EmbPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = KandinskyV22PriorEmb2EmbPipeline
params = ["prompt", "image"]
batch_params = ["prompt", "image"]
required_optional_params = [
"num_images_per_prompt",
"strength",
"generator",
"num_inference_steps",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
test_xformers_attention = False
@property
def text_embedder_hidden_size(self):
return 32
@property
def time_input_dim(self):
return 32
@property
def block_out_channels_0(self):
return self.time_input_dim
@property
def time_embed_dim(self):
return self.time_input_dim * 4
@property
def cross_attention_dim(self):
return 100
@property
def dummy_tokenizer(self):
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def dummy_text_encoder(self):
torch.manual_seed(0)
config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=self.text_embedder_hidden_size,
projection_dim=self.text_embedder_hidden_size,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
)
return CLIPTextModelWithProjection(config)
@property
def dummy_prior(self):
torch.manual_seed(0)
model_kwargs = {
"num_attention_heads": 2,
"attention_head_dim": 12,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
model = PriorTransformer(**model_kwargs)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
model.clip_std = nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def dummy_image_encoder(self):
torch.manual_seed(0)
config = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size,
image_size=224,
projection_dim=self.text_embedder_hidden_size,
intermediate_size=37,
num_attention_heads=4,
num_channels=3,
num_hidden_layers=5,
patch_size=14,
)
model = CLIPVisionModelWithProjection(config)
return model
@property
def dummy_image_processor(self):
image_processor = CLIPImageProcessor(
crop_size=224,
do_center_crop=True,
do_normalize=True,
do_resize=True,
image_mean=[0.48145466, 0.4578275, 0.40821073],
image_std=[0.26862954, 0.26130258, 0.27577711],
resample=3,
size=224,
)
return image_processor
def get_dummy_components(self):
prior = self.dummy_prior
image_encoder = self.dummy_image_encoder
text_encoder = self.dummy_text_encoder
tokenizer = self.dummy_tokenizer
image_processor = self.dummy_image_processor
scheduler = UnCLIPScheduler(
variance_type="fixed_small_log",
prediction_type="sample",
num_train_timesteps=1000,
clip_sample=True,
clip_sample_range=10.0,
)
components = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device)
image = image.cpu().permute(0, 2, 3, 1)[0]
init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256))
inputs = {
"prompt": "horse",
"image": init_image,
"strength": 0.5,
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def test_kandinsky_prior_emb2emb(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
output = pipe(**self.get_dummy_inputs(device))
image = output.image_embeds
image_from_tuple = pipe(
**self.get_dummy_inputs(device),
return_dict=False,
)[0]
image_slice = image[0, -10:]
image_from_tuple_slice = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
expected_slice = np.array(
[
0.1071284,
1.3330271,
0.61260223,
-0.6691065,
-0.3846852,
-1.0303661,
0.22716111,
0.03348901,
0.30040675,
-0.24805029,
]
)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def test_inference_batch_single_identical(self):
self._test_inference_batch_single_identical(expected_max_diff=1e-2)
@skip_mps
def test_attention_slicing_forward_pass(self):
test_max_difference = torch_device == "cpu"
test_mean_pixel_difference = False
self._test_attention_slicing_forward_pass(
test_max_difference=test_max_difference,
test_mean_pixel_difference=test_mean_pixel_difference,
)
| 0
|
hf_public_repos/diffusers/tests/pipelines
|
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyV22Pipeline, KandinskyV22PriorPipeline, UNet2DConditionModel, VQModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_numpy,
require_torch_gpu,
slow,
torch_device,
)
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class Dummies:
@property
def text_embedder_hidden_size(self):
return 32
@property
def time_input_dim(self):
return 32
@property
def block_out_channels_0(self):
return self.time_input_dim
@property
def time_embed_dim(self):
return self.time_input_dim * 4
@property
def cross_attention_dim(self):
return 32
@property
def dummy_unet(self):
torch.manual_seed(0)
model_kwargs = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
model = UNet2DConditionModel(**model_kwargs)
return model
@property
def dummy_movq_kwargs(self):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def dummy_movq(self):
torch.manual_seed(0)
model = VQModel(**self.dummy_movq_kwargs)
return model
def get_dummy_components(self):
unet = self.dummy_unet
movq = self.dummy_movq
scheduler = DDIMScheduler(
num_train_timesteps=1000,
beta_schedule="linear",
beta_start=0.00085,
beta_end=0.012,
clip_sample=False,
set_alpha_to_one=False,
steps_offset=1,
prediction_type="epsilon",
thresholding=False,
)
components = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def get_dummy_inputs(self, device, seed=0):
image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device)
negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to(
device
)
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
class KandinskyV22PipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = KandinskyV22Pipeline
params = [
"image_embeds",
"negative_image_embeds",
]
batch_params = ["image_embeds", "negative_image_embeds"]
required_optional_params = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
callback_cfg_params = ["image_embds"]
test_xformers_attention = False
def get_dummy_inputs(self, device, seed=0):
dummies = Dummies()
return dummies.get_dummy_inputs(device=device, seed=seed)
def get_dummy_components(self):
dummies = Dummies()
return dummies.get_dummy_components()
def test_kandinsky(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
output = pipe(**self.get_dummy_inputs(device))
image = output.images
image_from_tuple = pipe(
**self.get_dummy_inputs(device),
return_dict=False,
)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.3420, 0.9505, 0.3919, 1.0000, 0.5188, 0.3109, 0.6139, 0.5624, 0.6811])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=1e-1)
@slow
@require_torch_gpu
class KandinskyV22PipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_kandinsky_text2img(self):
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy"
)
pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
)
pipe_prior.to(torch_device)
pipeline = KandinskyV22Pipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
)
pipeline = pipeline.to(torch_device)
pipeline.set_progress_bar_config(disable=None)
prompt = "red cat, 4k photo"
generator = torch.Generator(device="cuda").manual_seed(0)
image_emb, zero_image_emb = pipe_prior(
prompt,
generator=generator,
num_inference_steps=5,
negative_prompt="",
).to_tuple()
generator = torch.Generator(device="cuda").manual_seed(0)
output = pipeline(
image_embeds=image_emb,
negative_image_embeds=zero_image_emb,
generator=generator,
num_inference_steps=100,
output_type="np",
)
image = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(image, expected_image)
| 0
|
hf_public_repos/diffusers/tests/pipelines
|
hf_public_repos/diffusers/tests/pipelines/animatediff/test_animatediff.py
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AnimateDiffPipeline,
AutoencoderKL,
DDIMScheduler,
MotionAdapter,
UNet2DConditionModel,
UNetMotionModel,
)
from diffusers.utils import logging
from diffusers.utils.testing_utils import numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
def to_np(tensor):
if isinstance(tensor, torch.Tensor):
tensor = tensor.detach().cpu().numpy()
return tensor
class AnimateDiffPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = AnimateDiffPipeline
params = TEXT_TO_IMAGE_PARAMS
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
required_optional_params = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
]
)
def get_dummy_components(self):
torch.manual_seed(0)
unet = UNet2DConditionModel(
block_out_channels=(32, 64),
layers_per_block=2,
sample_size=32,
in_channels=4,
out_channels=4,
down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
cross_attention_dim=32,
norm_num_groups=2,
)
scheduler = DDIMScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="linear",
clip_sample=False,
)
torch.manual_seed(0)
vae = AutoencoderKL(
block_out_channels=[32, 64],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
)
torch.manual_seed(0)
text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
motion_adapter = MotionAdapter(
block_out_channels=(32, 64),
motion_layers_per_block=2,
motion_norm_num_groups=2,
motion_num_attention_heads=4,
)
components = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"motion_adapter": motion_adapter,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"feature_extractor": None,
"image_encoder": None,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 7.5,
"output_type": "pt",
}
return inputs
def test_motion_unet_loading(self):
components = self.get_dummy_components()
pipe = AnimateDiffPipeline(**components)
assert isinstance(pipe.unet, UNetMotionModel)
@unittest.skip("Attention slicing is not enabled in this pipeline")
def test_attention_slicing_forward_pass(self):
pass
def test_inference_batch_single_identical(
self,
batch_size=2,
expected_max_diff=1e-4,
additional_params_copy_to_batched_inputs=["num_inference_steps"],
):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
for components in pipe.components.values():
if hasattr(components, "set_default_attn_processor"):
components.set_default_attn_processor()
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
# Reset generator in case it is has been used in self.get_dummy_inputs
inputs["generator"] = self.get_generator(0)
logger = logging.get_logger(pipe.__module__)
logger.setLevel(level=diffusers.logging.FATAL)
# batchify inputs
batched_inputs = {}
batched_inputs.update(inputs)
for name in self.batch_params:
if name not in inputs:
continue
value = inputs[name]
if name == "prompt":
len_prompt = len(value)
batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)]
batched_inputs[name][-1] = 100 * "very long"
else:
batched_inputs[name] = batch_size * [value]
if "generator" in inputs:
batched_inputs["generator"] = [self.get_generator(i) for i in range(batch_size)]
if "batch_size" in inputs:
batched_inputs["batch_size"] = batch_size
for arg in additional_params_copy_to_batched_inputs:
batched_inputs[arg] = inputs[arg]
output = pipe(**inputs)
output_batch = pipe(**batched_inputs)
assert output_batch[0].shape[0] == batch_size
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
assert max_diff < expected_max_diff
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
def test_to_device(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.set_progress_bar_config(disable=None)
pipe.to("cpu")
# pipeline creates a new motion UNet under the hood. So we need to check the device from pipe.components
model_devices = [
component.device.type for component in pipe.components.values() if hasattr(component, "device")
]
self.assertTrue(all(device == "cpu" for device in model_devices))
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
self.assertTrue(np.isnan(output_cpu).sum() == 0)
pipe.to("cuda")
model_devices = [
component.device.type for component in pipe.components.values() if hasattr(component, "device")
]
self.assertTrue(all(device == "cuda" for device in model_devices))
output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0]
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
def test_to_dtype(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.set_progress_bar_config(disable=None)
# pipeline creates a new motion UNet under the hood. So we need to check the dtype from pipe.components
model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")]
self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes))
pipe.to(torch_dtype=torch.float16)
model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")]
self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes))
def test_prompt_embeds(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.set_progress_bar_config(disable=None)
pipe.to(torch_device)
inputs = self.get_dummy_inputs(torch_device)
inputs.pop("prompt")
inputs["prompt_embeds"] = torch.randn((1, 4, 32), device=torch_device)
pipe(**inputs)
@slow
@require_torch_gpu
class AnimateDiffPipelineSlowTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_animatediff(self):
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2")
pipe = AnimateDiffPipeline.from_pretrained("frankjoshua/toonyou_beta6", motion_adapter=adapter)
pipe = pipe.to(torch_device)
pipe.scheduler = DDIMScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="linear",
steps_offset=1,
clip_sample=False,
)
pipe.enable_vae_slicing()
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)
prompt = "night, b&w photo of old house, post apocalypse, forest, storm weather, wind, rocks, 8k uhd, dslr, soft lighting, high quality, film grain"
negative_prompt = "bad quality, worse quality"
generator = torch.Generator("cpu").manual_seed(0)
output = pipe(
prompt,
negative_prompt=negative_prompt,
num_frames=16,
generator=generator,
guidance_scale=7.5,
num_inference_steps=3,
output_type="np",
)
image = output.frames[0]
assert image.shape == (16, 512, 512, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array(
[
0.11357737,
0.11285847,
0.11180121,
0.11084166,
0.11414117,
0.09785956,
0.10742754,
0.10510018,
0.08045256,
]
)
assert numpy_cosine_similarity_distance(image_slice.flatten(), expected_slice.flatten()) < 1e-3
| 0
|
hf_public_repos/diffusers/tests/pipelines
|
hf_public_repos/diffusers/tests/pipelines/kandinsky/test_kandinsky_combined.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from diffusers import KandinskyCombinedPipeline, KandinskyImg2ImgCombinedPipeline, KandinskyInpaintCombinedPipeline
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin
from .test_kandinsky import Dummies
from .test_kandinsky_img2img import Dummies as Img2ImgDummies
from .test_kandinsky_inpaint import Dummies as InpaintDummies
from .test_kandinsky_prior import Dummies as PriorDummies
enable_full_determinism()
class KandinskyPipelineCombinedFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = KandinskyCombinedPipeline
params = [
"prompt",
]
batch_params = ["prompt", "negative_prompt"]
required_optional_params = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
test_xformers_attention = True
def get_dummy_components(self):
dummy = Dummies()
prior_dummy = PriorDummies()
components = dummy.get_dummy_components()
components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()})
return components
def get_dummy_inputs(self, device, seed=0):
prior_dummy = PriorDummies()
inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed)
inputs.update(
{
"height": 64,
"width": 64,
}
)
return inputs
def test_kandinsky(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
output = pipe(**self.get_dummy_inputs(device))
image = output.images
image_from_tuple = pipe(
**self.get_dummy_inputs(device),
return_dict=False,
)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.0000, 0.0000, 0.6777, 0.1363, 0.3624, 0.7868, 0.3869, 0.3395, 0.5068])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@require_torch_gpu
def test_offloads(self):
pipes = []
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components).to(torch_device)
pipes.append(sd_pipe)
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components)
sd_pipe.enable_model_cpu_offload()
pipes.append(sd_pipe)
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components)
sd_pipe.enable_sequential_cpu_offload()
pipes.append(sd_pipe)
image_slices = []
for pipe in pipes:
inputs = self.get_dummy_inputs(torch_device)
image = pipe(**inputs).images
image_slices.append(image[0, -3:, -3:, -1].flatten())
assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=1e-2)
def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=2e-1)
def test_dict_tuple_outputs_equivalent(self):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4)
class KandinskyPipelineImg2ImgCombinedFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = KandinskyImg2ImgCombinedPipeline
params = ["prompt", "image"]
batch_params = ["prompt", "negative_prompt", "image"]
required_optional_params = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
test_xformers_attention = False
def get_dummy_components(self):
dummy = Img2ImgDummies()
prior_dummy = PriorDummies()
components = dummy.get_dummy_components()
components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()})
return components
def get_dummy_inputs(self, device, seed=0):
prior_dummy = PriorDummies()
dummy = Img2ImgDummies()
inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed)
inputs.update(dummy.get_dummy_inputs(device=device, seed=seed))
inputs.pop("image_embeds")
inputs.pop("negative_image_embeds")
return inputs
def test_kandinsky(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
output = pipe(**self.get_dummy_inputs(device))
image = output.images
image_from_tuple = pipe(
**self.get_dummy_inputs(device),
return_dict=False,
)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.4260, 0.3596, 0.4571, 0.3890, 0.4087, 0.5137, 0.4819, 0.4116, 0.5053])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@require_torch_gpu
def test_offloads(self):
pipes = []
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components).to(torch_device)
pipes.append(sd_pipe)
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components)
sd_pipe.enable_model_cpu_offload()
pipes.append(sd_pipe)
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components)
sd_pipe.enable_sequential_cpu_offload()
pipes.append(sd_pipe)
image_slices = []
for pipe in pipes:
inputs = self.get_dummy_inputs(torch_device)
image = pipe(**inputs).images
image_slices.append(image[0, -3:, -3:, -1].flatten())
assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=1e-2)
def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=5e-1)
def test_dict_tuple_outputs_equivalent(self):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4)
def test_save_load_optional_components(self):
super().test_save_load_optional_components(expected_max_difference=5e-4)
class KandinskyPipelineInpaintCombinedFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = KandinskyInpaintCombinedPipeline
params = ["prompt", "image", "mask_image"]
batch_params = ["prompt", "negative_prompt", "image", "mask_image"]
required_optional_params = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
test_xformers_attention = False
def get_dummy_components(self):
dummy = InpaintDummies()
prior_dummy = PriorDummies()
components = dummy.get_dummy_components()
components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()})
return components
def get_dummy_inputs(self, device, seed=0):
prior_dummy = PriorDummies()
dummy = InpaintDummies()
inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed)
inputs.update(dummy.get_dummy_inputs(device=device, seed=seed))
inputs.pop("image_embeds")
inputs.pop("negative_image_embeds")
return inputs
def test_kandinsky(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
output = pipe(**self.get_dummy_inputs(device))
image = output.images
image_from_tuple = pipe(
**self.get_dummy_inputs(device),
return_dict=False,
)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.0477, 0.0808, 0.2972, 0.2705, 0.3620, 0.6247, 0.4464, 0.2870, 0.3530])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@require_torch_gpu
def test_offloads(self):
pipes = []
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components).to(torch_device)
pipes.append(sd_pipe)
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components)
sd_pipe.enable_model_cpu_offload()
pipes.append(sd_pipe)
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components)
sd_pipe.enable_sequential_cpu_offload()
pipes.append(sd_pipe)
image_slices = []
for pipe in pipes:
inputs = self.get_dummy_inputs(torch_device)
image = pipe(**inputs).images
image_slices.append(image[0, -3:, -3:, -1].flatten())
assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=1e-2)
def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=5e-1)
def test_dict_tuple_outputs_equivalent(self):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4)
def test_save_load_optional_components(self):
super().test_save_load_optional_components(expected_max_difference=5e-4)
def test_save_load_local(self):
super().test_save_load_local(expected_max_difference=5e-3)
| 0
|
hf_public_repos/diffusers/tests/pipelines
|
hf_public_repos/diffusers/tests/pipelines/kandinsky/test_kandinsky_inpaint.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNet2DConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
nightly,
require_torch_gpu,
torch_device,
)
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class Dummies:
@property
def text_embedder_hidden_size(self):
return 32
@property
def time_input_dim(self):
return 32
@property
def block_out_channels_0(self):
return self.time_input_dim
@property
def time_embed_dim(self):
return self.time_input_dim * 4
@property
def cross_attention_dim(self):
return 32
@property
def dummy_tokenizer(self):
tokenizer = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base")
return tokenizer
@property
def dummy_text_encoder(self):
torch.manual_seed(0)
config = MCLIPConfig(
numDims=self.cross_attention_dim,
transformerDimensions=self.text_embedder_hidden_size,
hidden_size=self.text_embedder_hidden_size,
intermediate_size=37,
num_attention_heads=4,
num_hidden_layers=5,
vocab_size=1005,
)
text_encoder = MultilingualCLIP(config)
text_encoder = text_encoder.eval()
return text_encoder
@property
def dummy_unet(self):
torch.manual_seed(0)
model_kwargs = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
model = UNet2DConditionModel(**model_kwargs)
return model
@property
def dummy_movq_kwargs(self):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def dummy_movq(self):
torch.manual_seed(0)
model = VQModel(**self.dummy_movq_kwargs)
return model
def get_dummy_components(self):
text_encoder = self.dummy_text_encoder
tokenizer = self.dummy_tokenizer
unet = self.dummy_unet
movq = self.dummy_movq
scheduler = DDIMScheduler(
num_train_timesteps=1000,
beta_schedule="linear",
beta_start=0.00085,
beta_end=0.012,
clip_sample=False,
set_alpha_to_one=False,
steps_offset=1,
prediction_type="epsilon",
thresholding=False,
)
components = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def get_dummy_inputs(self, device, seed=0):
image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed)).to(device)
negative_image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1)).to(device)
# create init_image
image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device)
image = image.cpu().permute(0, 2, 3, 1)[0]
init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256))
# create mask
mask = np.zeros((64, 64), dtype=np.float32)
mask[:32, :32] = 1
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
class KandinskyInpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = KandinskyInpaintPipeline
params = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
batch_params = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
required_optional_params = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
test_xformers_attention = False
def get_dummy_components(self):
dummies = Dummies()
return dummies.get_dummy_components()
def get_dummy_inputs(self, device, seed=0):
dummies = Dummies()
return dummies.get_dummy_inputs(device=device, seed=seed)
def test_kandinsky_inpaint(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
output = pipe(**self.get_dummy_inputs(device))
image = output.images
image_from_tuple = pipe(
**self.get_dummy_inputs(device),
return_dict=False,
)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.8222, 0.8896, 0.4373, 0.8088, 0.4905, 0.2609, 0.6816, 0.4291, 0.5129])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@require_torch_gpu
def test_offloads(self):
pipes = []
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components).to(torch_device)
pipes.append(sd_pipe)
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components)
sd_pipe.enable_model_cpu_offload()
pipes.append(sd_pipe)
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components)
sd_pipe.enable_sequential_cpu_offload()
pipes.append(sd_pipe)
image_slices = []
for pipe in pipes:
inputs = self.get_dummy_inputs(torch_device)
image = pipe(**inputs).images
image_slices.append(image[0, -3:, -3:, -1].flatten())
assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=5e-1)
@nightly
@require_torch_gpu
class KandinskyInpaintPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_kandinsky_inpaint(self):
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy"
)
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png"
)
mask = np.zeros((768, 768), dtype=np.float32)
mask[:250, 250:-250] = 1
prompt = "a hat"
pipe_prior = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16
)
pipe_prior.to(torch_device)
pipeline = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint", torch_dtype=torch.float16
)
pipeline = pipeline.to(torch_device)
pipeline.set_progress_bar_config(disable=None)
generator = torch.Generator(device="cpu").manual_seed(0)
image_emb, zero_image_emb = pipe_prior(
prompt,
generator=generator,
num_inference_steps=5,
negative_prompt="",
).to_tuple()
output = pipeline(
prompt,
image=init_image,
mask_image=mask,
image_embeds=image_emb,
negative_image_embeds=zero_image_emb,
generator=generator,
num_inference_steps=100,
height=768,
width=768,
output_type="np",
)
image = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(image, expected_image)
| 0
|
hf_public_repos/diffusers/tests/pipelines
|
hf_public_repos/diffusers/tests/pipelines/kandinsky/test_kandinsky_img2img.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import (
DDIMScheduler,
DDPMScheduler,
KandinskyImg2ImgPipeline,
KandinskyPriorPipeline,
UNet2DConditionModel,
VQModel,
)
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class Dummies:
@property
def text_embedder_hidden_size(self):
return 32
@property
def time_input_dim(self):
return 32
@property
def block_out_channels_0(self):
return self.time_input_dim
@property
def time_embed_dim(self):
return self.time_input_dim * 4
@property
def cross_attention_dim(self):
return 32
@property
def dummy_tokenizer(self):
tokenizer = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base")
return tokenizer
@property
def dummy_text_encoder(self):
torch.manual_seed(0)
config = MCLIPConfig(
numDims=self.cross_attention_dim,
transformerDimensions=self.text_embedder_hidden_size,
hidden_size=self.text_embedder_hidden_size,
intermediate_size=37,
num_attention_heads=4,
num_hidden_layers=5,
vocab_size=1005,
)
text_encoder = MultilingualCLIP(config)
text_encoder = text_encoder.eval()
return text_encoder
@property
def dummy_unet(self):
torch.manual_seed(0)
model_kwargs = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
model = UNet2DConditionModel(**model_kwargs)
return model
@property
def dummy_movq_kwargs(self):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def dummy_movq(self):
torch.manual_seed(0)
model = VQModel(**self.dummy_movq_kwargs)
return model
def get_dummy_components(self):
text_encoder = self.dummy_text_encoder
tokenizer = self.dummy_tokenizer
unet = self.dummy_unet
movq = self.dummy_movq
ddim_config = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.00085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
scheduler = DDIMScheduler(**ddim_config)
components = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def get_dummy_inputs(self, device, seed=0):
image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed)).to(device)
negative_image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1)).to(device)
# create init_image
image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device)
image = image.cpu().permute(0, 2, 3, 1)[0]
init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256))
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
class KandinskyImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = KandinskyImg2ImgPipeline
params = ["prompt", "image_embeds", "negative_image_embeds", "image"]
batch_params = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
required_optional_params = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
test_xformers_attention = False
def get_dummy_components(self):
dummies = Dummies()
return dummies.get_dummy_components()
def get_dummy_inputs(self, device, seed=0):
dummies = Dummies()
return dummies.get_dummy_inputs(device=device, seed=seed)
def test_kandinsky_img2img(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
output = pipe(**self.get_dummy_inputs(device))
image = output.images
image_from_tuple = pipe(
**self.get_dummy_inputs(device),
return_dict=False,
)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.5816, 0.5872, 0.4634, 0.5982, 0.4767, 0.4710, 0.4669, 0.4717, 0.4966])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@require_torch_gpu
def test_offloads(self):
pipes = []
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components).to(torch_device)
pipes.append(sd_pipe)
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components)
sd_pipe.enable_model_cpu_offload()
pipes.append(sd_pipe)
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components)
sd_pipe.enable_sequential_cpu_offload()
pipes.append(sd_pipe)
image_slices = []
for pipe in pipes:
inputs = self.get_dummy_inputs(torch_device)
image = pipe(**inputs).images
image_slices.append(image[0, -3:, -3:, -1].flatten())
assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
def test_dict_tuple_outputs_equivalent(self):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4)
@slow
@require_torch_gpu
class KandinskyImg2ImgPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_kandinsky_img2img(self):
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy"
)
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png"
)
prompt = "A red cartoon frog, 4k"
pipe_prior = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16
)
pipe_prior.to(torch_device)
pipeline = KandinskyImg2ImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16
)
pipeline = pipeline.to(torch_device)
pipeline.set_progress_bar_config(disable=None)
generator = torch.Generator(device="cpu").manual_seed(0)
image_emb, zero_image_emb = pipe_prior(
prompt,
generator=generator,
num_inference_steps=5,
negative_prompt="",
).to_tuple()
output = pipeline(
prompt,
image=init_image,
image_embeds=image_emb,
negative_image_embeds=zero_image_emb,
generator=generator,
num_inference_steps=100,
height=768,
width=768,
strength=0.2,
output_type="np",
)
image = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(image, expected_image)
@nightly
@require_torch_gpu
class KandinskyImg2ImgPipelineNightlyTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_kandinsky_img2img_ddpm(self):
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_ddpm_frog.npy"
)
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/frog.png"
)
prompt = "A red cartoon frog, 4k"
pipe_prior = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16
)
pipe_prior.to(torch_device)
scheduler = DDPMScheduler.from_pretrained("kandinsky-community/kandinsky-2-1", subfolder="ddpm_scheduler")
pipeline = KandinskyImg2ImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1", scheduler=scheduler, torch_dtype=torch.float16
)
pipeline = pipeline.to(torch_device)
pipeline.set_progress_bar_config(disable=None)
generator = torch.Generator(device="cpu").manual_seed(0)
image_emb, zero_image_emb = pipe_prior(
prompt,
generator=generator,
num_inference_steps=5,
negative_prompt="",
).to_tuple()
output = pipeline(
prompt,
image=init_image,
image_embeds=image_emb,
negative_image_embeds=zero_image_emb,
generator=generator,
num_inference_steps=100,
height=768,
width=768,
strength=0.2,
output_type="np",
)
image = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(image, expected_image)
| 0
|
hf_public_repos/diffusers/tests/pipelines
|
hf_public_repos/diffusers/tests/pipelines/kandinsky/test_kandinsky_prior.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps, torch_device
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class Dummies:
@property
def text_embedder_hidden_size(self):
return 32
@property
def time_input_dim(self):
return 32
@property
def block_out_channels_0(self):
return self.time_input_dim
@property
def time_embed_dim(self):
return self.time_input_dim * 4
@property
def cross_attention_dim(self):
return 100
@property
def dummy_tokenizer(self):
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def dummy_text_encoder(self):
torch.manual_seed(0)
config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=self.text_embedder_hidden_size,
projection_dim=self.text_embedder_hidden_size,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
)
return CLIPTextModelWithProjection(config)
@property
def dummy_prior(self):
torch.manual_seed(0)
model_kwargs = {
"num_attention_heads": 2,
"attention_head_dim": 12,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
model = PriorTransformer(**model_kwargs)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
model.clip_std = nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def dummy_image_encoder(self):
torch.manual_seed(0)
config = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size,
image_size=224,
projection_dim=self.text_embedder_hidden_size,
intermediate_size=37,
num_attention_heads=4,
num_channels=3,
num_hidden_layers=5,
patch_size=14,
)
model = CLIPVisionModelWithProjection(config)
return model
@property
def dummy_image_processor(self):
image_processor = CLIPImageProcessor(
crop_size=224,
do_center_crop=True,
do_normalize=True,
do_resize=True,
image_mean=[0.48145466, 0.4578275, 0.40821073],
image_std=[0.26862954, 0.26130258, 0.27577711],
resample=3,
size=224,
)
return image_processor
def get_dummy_components(self):
prior = self.dummy_prior
image_encoder = self.dummy_image_encoder
text_encoder = self.dummy_text_encoder
tokenizer = self.dummy_tokenizer
image_processor = self.dummy_image_processor
scheduler = UnCLIPScheduler(
variance_type="fixed_small_log",
prediction_type="sample",
num_train_timesteps=1000,
clip_sample=True,
clip_sample_range=10.0,
)
components = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
class KandinskyPriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = KandinskyPriorPipeline
params = ["prompt"]
batch_params = ["prompt", "negative_prompt"]
required_optional_params = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
test_xformers_attention = False
def get_dummy_components(self):
dummy = Dummies()
return dummy.get_dummy_components()
def get_dummy_inputs(self, device, seed=0):
dummy = Dummies()
return dummy.get_dummy_inputs(device=device, seed=seed)
def test_kandinsky_prior(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
output = pipe(**self.get_dummy_inputs(device))
image = output.image_embeds
image_from_tuple = pipe(
**self.get_dummy_inputs(device),
return_dict=False,
)[0]
image_slice = image[0, -10:]
image_from_tuple_slice = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
expected_slice = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156]
)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def test_inference_batch_single_identical(self):
self._test_inference_batch_single_identical(expected_max_diff=1e-2)
@skip_mps
def test_attention_slicing_forward_pass(self):
test_max_difference = torch_device == "cpu"
test_mean_pixel_difference = False
self._test_attention_slicing_forward_pass(
test_max_difference=test_max_difference,
test_mean_pixel_difference=test_mean_pixel_difference,
)
| 0
|
hf_public_repos/diffusers/tests/pipelines
|
hf_public_repos/diffusers/tests/pipelines/kandinsky/test_kandinsky.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyPipeline, KandinskyPriorPipeline, UNet2DConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_numpy,
require_torch_gpu,
slow,
torch_device,
)
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class Dummies:
@property
def text_embedder_hidden_size(self):
return 32
@property
def time_input_dim(self):
return 32
@property
def block_out_channels_0(self):
return self.time_input_dim
@property
def time_embed_dim(self):
return self.time_input_dim * 4
@property
def cross_attention_dim(self):
return 32
@property
def dummy_tokenizer(self):
tokenizer = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base")
return tokenizer
@property
def dummy_text_encoder(self):
torch.manual_seed(0)
config = MCLIPConfig(
numDims=self.cross_attention_dim,
transformerDimensions=self.text_embedder_hidden_size,
hidden_size=self.text_embedder_hidden_size,
intermediate_size=37,
num_attention_heads=4,
num_hidden_layers=5,
vocab_size=1005,
)
text_encoder = MultilingualCLIP(config)
text_encoder = text_encoder.eval()
return text_encoder
@property
def dummy_unet(self):
torch.manual_seed(0)
model_kwargs = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
model = UNet2DConditionModel(**model_kwargs)
return model
@property
def dummy_movq_kwargs(self):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def dummy_movq(self):
torch.manual_seed(0)
model = VQModel(**self.dummy_movq_kwargs)
return model
def get_dummy_components(self):
text_encoder = self.dummy_text_encoder
tokenizer = self.dummy_tokenizer
unet = self.dummy_unet
movq = self.dummy_movq
scheduler = DDIMScheduler(
num_train_timesteps=1000,
beta_schedule="linear",
beta_start=0.00085,
beta_end=0.012,
clip_sample=False,
set_alpha_to_one=False,
steps_offset=1,
prediction_type="epsilon",
thresholding=False,
)
components = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def get_dummy_inputs(self, device, seed=0):
image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed)).to(device)
negative_image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1)).to(device)
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "horse",
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
class KandinskyPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = KandinskyPipeline
params = [
"prompt",
"image_embeds",
"negative_image_embeds",
]
batch_params = ["prompt", "negative_prompt", "image_embeds", "negative_image_embeds"]
required_optional_params = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
test_xformers_attention = False
def get_dummy_components(self):
dummy = Dummies()
return dummy.get_dummy_components()
def get_dummy_inputs(self, device, seed=0):
dummy = Dummies()
return dummy.get_dummy_inputs(device=device, seed=seed)
def test_kandinsky(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
output = pipe(**self.get_dummy_inputs(device))
image = output.images
image_from_tuple = pipe(
**self.get_dummy_inputs(device),
return_dict=False,
)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([1.0000, 1.0000, 0.2766, 1.0000, 0.5447, 0.1737, 1.0000, 0.4316, 0.9024])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@require_torch_gpu
def test_offloads(self):
pipes = []
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components).to(torch_device)
pipes.append(sd_pipe)
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components)
sd_pipe.enable_model_cpu_offload()
pipes.append(sd_pipe)
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components)
sd_pipe.enable_sequential_cpu_offload()
pipes.append(sd_pipe)
image_slices = []
for pipe in pipes:
inputs = self.get_dummy_inputs(torch_device)
image = pipe(**inputs).images
image_slices.append(image[0, -3:, -3:, -1].flatten())
assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
@slow
@require_torch_gpu
class KandinskyPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_kandinsky_text2img(self):
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_text2img_cat_fp16.npy"
)
pipe_prior = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16
)
pipe_prior.to(torch_device)
pipeline = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16)
pipeline = pipeline.to(torch_device)
pipeline.set_progress_bar_config(disable=None)
prompt = "red cat, 4k photo"
generator = torch.Generator(device="cuda").manual_seed(0)
image_emb, zero_image_emb = pipe_prior(
prompt,
generator=generator,
num_inference_steps=5,
negative_prompt="",
).to_tuple()
generator = torch.Generator(device="cuda").manual_seed(0)
output = pipeline(
prompt,
image_embeds=image_emb,
negative_image_embeds=zero_image_emb,
generator=generator,
num_inference_steps=100,
output_type="np",
)
image = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(image, expected_image)
| 0
|
hf_public_repos/diffusers/tests/pipelines
|
hf_public_repos/diffusers/tests/pipelines/wuerstchen/test_wuerstchen_prior.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import DDPMWuerstchenScheduler, WuerstchenPriorPipeline
from diffusers.loaders import AttnProcsLayers
from diffusers.models.attention_processor import (
LoRAAttnProcessor,
LoRAAttnProcessor2_0,
)
from diffusers.pipelines.wuerstchen import WuerstchenPrior
from diffusers.utils.import_utils import is_peft_available
from diffusers.utils.testing_utils import enable_full_determinism, require_peft_backend, skip_mps, torch_device
if is_peft_available():
from peft import LoraConfig
from peft.tuners.tuners_utils import BaseTunerLayer
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
def create_prior_lora_layers(unet: nn.Module):
lora_attn_procs = {}
for name in unet.attn_processors.keys():
lora_attn_processor_class = (
LoRAAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else LoRAAttnProcessor
)
lora_attn_procs[name] = lora_attn_processor_class(
hidden_size=unet.config.c,
)
unet_lora_layers = AttnProcsLayers(lora_attn_procs)
return lora_attn_procs, unet_lora_layers
class WuerstchenPriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = WuerstchenPriorPipeline
params = ["prompt"]
batch_params = ["prompt", "negative_prompt"]
required_optional_params = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
test_xformers_attention = False
callback_cfg_params = ["text_encoder_hidden_states"]
@property
def text_embedder_hidden_size(self):
return 32
@property
def time_input_dim(self):
return 32
@property
def block_out_channels_0(self):
return self.time_input_dim
@property
def time_embed_dim(self):
return self.time_input_dim * 4
@property
def dummy_tokenizer(self):
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def dummy_text_encoder(self):
torch.manual_seed(0)
config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=self.text_embedder_hidden_size,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
)
return CLIPTextModel(config).eval()
@property
def dummy_prior(self):
torch.manual_seed(0)
model_kwargs = {
"c_in": 2,
"c": 8,
"depth": 2,
"c_cond": 32,
"c_r": 8,
"nhead": 2,
}
model = WuerstchenPrior(**model_kwargs)
return model.eval()
def get_dummy_components(self):
prior = self.dummy_prior
text_encoder = self.dummy_text_encoder
tokenizer = self.dummy_tokenizer
scheduler = DDPMWuerstchenScheduler()
components = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def test_wuerstchen_prior(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
output = pipe(**self.get_dummy_inputs(device))
image = output.image_embeddings
image_from_tuple = pipe(**self.get_dummy_inputs(device), return_dict=False)[0]
image_slice = image[0, 0, 0, -10:]
image_from_tuple_slice = image_from_tuple[0, 0, 0, -10:]
assert image.shape == (1, 2, 24, 24)
expected_slice = np.array(
[
-7172.837,
-3438.855,
-1093.312,
388.8835,
-7471.467,
-7998.1206,
-5328.259,
218.00089,
-2731.5745,
-8056.734,
]
)
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5e-2
@skip_mps
def test_inference_batch_single_identical(self):
self._test_inference_batch_single_identical(
expected_max_diff=2e-1,
)
@skip_mps
def test_attention_slicing_forward_pass(self):
test_max_difference = torch_device == "cpu"
test_mean_pixel_difference = False
self._test_attention_slicing_forward_pass(
test_max_difference=test_max_difference,
test_mean_pixel_difference=test_mean_pixel_difference,
)
@unittest.skip(reason="flaky for now")
def test_float16_inference(self):
super().test_float16_inference()
# override because we need to make sure latent_mean and latent_std to be 0
def test_callback_inputs(self):
components = self.get_dummy_components()
components["latent_mean"] = 0
components["latent_std"] = 0
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
self.assertTrue(
hasattr(pipe, "_callback_tensor_inputs"),
f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs",
)
def callback_inputs_test(pipe, i, t, callback_kwargs):
missing_callback_inputs = set()
for v in pipe._callback_tensor_inputs:
if v not in callback_kwargs:
missing_callback_inputs.add(v)
self.assertTrue(
len(missing_callback_inputs) == 0, f"Missing callback tensor inputs: {missing_callback_inputs}"
)
last_i = pipe.num_timesteps - 1
if i == last_i:
callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"])
return callback_kwargs
inputs = self.get_dummy_inputs(torch_device)
inputs["callback_on_step_end"] = callback_inputs_test
inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs
inputs["output_type"] = "latent"
output = pipe(**inputs)[0]
assert output.abs().sum() == 0
def check_if_lora_correctly_set(self, model) -> bool:
"""
Checks if the LoRA layers are correctly set with peft
"""
for module in model.modules():
if isinstance(module, BaseTunerLayer):
return True
return False
def get_lora_components(self):
prior = self.dummy_prior
prior_lora_config = LoraConfig(
r=4, lora_alpha=4, target_modules=["to_q", "to_k", "to_v", "to_out.0"], init_lora_weights=False
)
prior_lora_attn_procs, prior_lora_layers = create_prior_lora_layers(prior)
lora_components = {
"prior_lora_layers": prior_lora_layers,
"prior_lora_attn_procs": prior_lora_attn_procs,
}
return prior, prior_lora_config, lora_components
@require_peft_backend
def test_inference_with_prior_lora(self):
_, prior_lora_config, _ = self.get_lora_components()
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
output_no_lora = pipe(**self.get_dummy_inputs(device))
image_embed = output_no_lora.image_embeddings
self.assertTrue(image_embed.shape == (1, 2, 24, 24))
pipe.prior.add_adapter(prior_lora_config)
self.assertTrue(self.check_if_lora_correctly_set(pipe.prior), "Lora not correctly set in prior")
output_lora = pipe(**self.get_dummy_inputs(device))
lora_image_embed = output_lora.image_embeddings
self.assertTrue(image_embed.shape == lora_image_embed.shape)
| 0
|
hf_public_repos/diffusers/tests/pipelines
|
hf_public_repos/diffusers/tests/pipelines/wuerstchen/test_wuerstchen_combined.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import DDPMWuerstchenScheduler, WuerstchenCombinedPipeline
from diffusers.pipelines.wuerstchen import PaellaVQModel, WuerstchenDiffNeXt, WuerstchenPrior
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class WuerstchenCombinedPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = WuerstchenCombinedPipeline
params = ["prompt"]
batch_params = ["prompt", "negative_prompt"]
required_optional_params = [
"generator",
"height",
"width",
"latents",
"prior_guidance_scale",
"decoder_guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"prior_num_inference_steps",
"output_type",
"return_dict",
]
test_xformers_attention = True
@property
def text_embedder_hidden_size(self):
return 32
@property
def dummy_prior(self):
torch.manual_seed(0)
model_kwargs = {"c_in": 2, "c": 8, "depth": 2, "c_cond": 32, "c_r": 8, "nhead": 2}
model = WuerstchenPrior(**model_kwargs)
return model.eval()
@property
def dummy_tokenizer(self):
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def dummy_prior_text_encoder(self):
torch.manual_seed(0)
config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=self.text_embedder_hidden_size,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
)
return CLIPTextModel(config).eval()
@property
def dummy_text_encoder(self):
torch.manual_seed(0)
config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
projection_dim=self.text_embedder_hidden_size,
hidden_size=self.text_embedder_hidden_size,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
)
return CLIPTextModel(config).eval()
@property
def dummy_vqgan(self):
torch.manual_seed(0)
model_kwargs = {
"bottleneck_blocks": 1,
"num_vq_embeddings": 2,
}
model = PaellaVQModel(**model_kwargs)
return model.eval()
@property
def dummy_decoder(self):
torch.manual_seed(0)
model_kwargs = {
"c_cond": self.text_embedder_hidden_size,
"c_hidden": [320],
"nhead": [-1],
"blocks": [4],
"level_config": ["CT"],
"clip_embd": self.text_embedder_hidden_size,
"inject_effnet": [False],
}
model = WuerstchenDiffNeXt(**model_kwargs)
return model.eval()
def get_dummy_components(self):
prior = self.dummy_prior
prior_text_encoder = self.dummy_prior_text_encoder
scheduler = DDPMWuerstchenScheduler()
tokenizer = self.dummy_tokenizer
text_encoder = self.dummy_text_encoder
decoder = self.dummy_decoder
vqgan = self.dummy_vqgan
components = {
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"decoder": decoder,
"vqgan": vqgan,
"scheduler": scheduler,
"prior_prior": prior,
"prior_text_encoder": prior_text_encoder,
"prior_tokenizer": tokenizer,
"prior_scheduler": scheduler,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "horse",
"generator": generator,
"prior_guidance_scale": 4.0,
"decoder_guidance_scale": 4.0,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "np",
"height": 128,
"width": 128,
}
return inputs
def test_wuerstchen(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
output = pipe(**self.get_dummy_inputs(device))
image = output.images
image_from_tuple = pipe(**self.get_dummy_inputs(device), return_dict=False)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[-3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
expected_slice = np.array([0.7616304, 0.0, 1.0, 0.0, 1.0, 0.0, 0.05925313, 0.0, 0.951898])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@require_torch_gpu
def test_offloads(self):
pipes = []
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components).to(torch_device)
pipes.append(sd_pipe)
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components)
sd_pipe.enable_sequential_cpu_offload()
pipes.append(sd_pipe)
components = self.get_dummy_components()
sd_pipe = self.pipeline_class(**components)
sd_pipe.enable_model_cpu_offload()
pipes.append(sd_pipe)
image_slices = []
for pipe in pipes:
inputs = self.get_dummy_inputs(torch_device)
image = pipe(**inputs).images
image_slices.append(image[0, -3:, -3:, -1].flatten())
assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=1e-2)
@unittest.skip(reason="flakey and float16 requires CUDA")
def test_float16_inference(self):
super().test_float16_inference()
def test_callback_inputs(self):
pass
def test_callback_cfg(self):
pass
| 0
|
hf_public_repos/diffusers/tests/pipelines
|
hf_public_repos/diffusers/tests/pipelines/wuerstchen/test_wuerstchen_decoder.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import DDPMWuerstchenScheduler, WuerstchenDecoderPipeline
from diffusers.pipelines.wuerstchen import PaellaVQModel, WuerstchenDiffNeXt
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps, torch_device
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class WuerstchenDecoderPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = WuerstchenDecoderPipeline
params = ["prompt"]
batch_params = ["image_embeddings", "prompt", "negative_prompt"]
required_optional_params = [
"num_images_per_prompt",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
test_xformers_attention = False
callback_cfg_params = ["image_embeddings", "text_encoder_hidden_states"]
@property
def text_embedder_hidden_size(self):
return 32
@property
def time_input_dim(self):
return 32
@property
def block_out_channels_0(self):
return self.time_input_dim
@property
def time_embed_dim(self):
return self.time_input_dim * 4
@property
def dummy_tokenizer(self):
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def dummy_text_encoder(self):
torch.manual_seed(0)
config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
projection_dim=self.text_embedder_hidden_size,
hidden_size=self.text_embedder_hidden_size,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
)
return CLIPTextModel(config).eval()
@property
def dummy_vqgan(self):
torch.manual_seed(0)
model_kwargs = {
"bottleneck_blocks": 1,
"num_vq_embeddings": 2,
}
model = PaellaVQModel(**model_kwargs)
return model.eval()
@property
def dummy_decoder(self):
torch.manual_seed(0)
model_kwargs = {
"c_cond": self.text_embedder_hidden_size,
"c_hidden": [320],
"nhead": [-1],
"blocks": [4],
"level_config": ["CT"],
"clip_embd": self.text_embedder_hidden_size,
"inject_effnet": [False],
}
model = WuerstchenDiffNeXt(**model_kwargs)
return model.eval()
def get_dummy_components(self):
decoder = self.dummy_decoder
text_encoder = self.dummy_text_encoder
tokenizer = self.dummy_tokenizer
vqgan = self.dummy_vqgan
scheduler = DDPMWuerstchenScheduler()
components = {
"decoder": decoder,
"vqgan": vqgan,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"latent_dim_scale": 4.0,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"image_embeddings": torch.ones((1, 4, 4, 4), device=device),
"prompt": "horse",
"generator": generator,
"guidance_scale": 1.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def test_wuerstchen_decoder(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
output = pipe(**self.get_dummy_inputs(device))
image = output.images
image_from_tuple = pipe(**self.get_dummy_inputs(device), return_dict=False)
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.0000, 0.0000, 0.0089, 1.0000, 1.0000, 0.3927, 1.0000, 1.0000, 1.0000])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def test_inference_batch_single_identical(self):
self._test_inference_batch_single_identical(expected_max_diff=1e-5)
@skip_mps
def test_attention_slicing_forward_pass(self):
test_max_difference = torch_device == "cpu"
test_mean_pixel_difference = False
self._test_attention_slicing_forward_pass(
test_max_difference=test_max_difference,
test_mean_pixel_difference=test_mean_pixel_difference,
)
@unittest.skip(reason="bf16 not supported and requires CUDA")
def test_float16_inference(self):
super().test_float16_inference()
| 0
|
hf_public_repos/diffusers/tests/pipelines
|
hf_public_repos/diffusers/tests/pipelines/text_to_video_synthesis/test_text_to_video.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
TextToVideoSDPipeline,
UNet3DConditionModel,
)
from diffusers.utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class TextToVideoSDPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = TextToVideoSDPipeline
params = TEXT_TO_IMAGE_PARAMS
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
required_optional_params = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
]
)
def get_dummy_components(self):
torch.manual_seed(0)
unet = UNet3DConditionModel(
block_out_channels=(4, 8),
layers_per_block=1,
sample_size=32,
in_channels=4,
out_channels=4,
down_block_types=("CrossAttnDownBlock3D", "DownBlock3D"),
up_block_types=("UpBlock3D", "CrossAttnUpBlock3D"),
cross_attention_dim=4,
attention_head_dim=4,
norm_num_groups=2,
)
scheduler = DDIMScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
clip_sample=False,
set_alpha_to_one=False,
)
torch.manual_seed(0)
vae = AutoencoderKL(
block_out_channels=(8,),
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D"],
latent_channels=4,
sample_size=32,
norm_num_groups=2,
)
torch.manual_seed(0)
text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=4,
intermediate_size=16,
layer_norm_eps=1e-05,
num_attention_heads=2,
num_hidden_layers=2,
pad_token_id=1,
vocab_size=1000,
hidden_act="gelu",
projection_dim=32,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
components = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def test_text_to_video_default_case(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
sd_pipe = TextToVideoSDPipeline(**components)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
inputs["output_type"] = "np"
frames = sd_pipe(**inputs).frames
image_slice = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
expected_slice = np.array([192.0, 44.0, 157.0, 140.0, 108.0, 104.0, 123.0, 144.0, 129.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@unittest.skipIf(torch_device != "cuda", reason="Feature isn't heavily used. Test in CUDA environment only.")
def test_attention_slicing_forward_pass(self):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=False, expected_max_diff=3e-3)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),
reason="XFormers attention is only available with CUDA and `xformers` installed",
)
def test_xformers_attention_forwardGenerator_pass(self):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False, expected_max_diff=1e-2)
# (todo): sayakpaul
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
def test_inference_batch_consistent(self):
pass
# (todo): sayakpaul
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
def test_inference_batch_single_identical(self):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.")
def test_num_images_per_prompt(self):
pass
def test_progress_bar(self):
return super().test_progress_bar()
@slow
@skip_mps
@require_torch_gpu
class TextToVideoSDPipelineSlowTests(unittest.TestCase):
def test_two_step_model(self):
expected_video = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy"
)
pipe = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b")
pipe = pipe.to(torch_device)
prompt = "Spiderman is surfing"
generator = torch.Generator(device="cpu").manual_seed(0)
video_frames = pipe(prompt, generator=generator, num_inference_steps=2, output_type="pt").frames
video = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5e-2
def test_two_step_model_with_freeu(self):
expected_video = []
pipe = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b")
pipe = pipe.to(torch_device)
prompt = "Spiderman is surfing"
generator = torch.Generator(device="cpu").manual_seed(0)
pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4)
video_frames = pipe(prompt, generator=generator, num_inference_steps=2, output_type="pt").frames
video = video_frames.cpu().numpy()
video = video[0, 0, -3:, -3:, -1].flatten()
expected_video = [-0.3102, -0.2477, -0.1772, -0.648, -0.6176, -0.5484, -0.0217, -0.056, -0.0177]
assert np.abs(expected_video - video).mean() < 5e-2
| 0
|
hf_public_repos/diffusers/tests/pipelines
|
hf_public_repos/diffusers/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import DDIMScheduler, TextToVideoZeroPipeline
from diffusers.utils.testing_utils import load_pt, nightly, require_torch_gpu
from ..test_pipelines_common import assert_mean_pixel_difference
@nightly
@require_torch_gpu
class TextToVideoZeroPipelineSlowTests(unittest.TestCase):
def test_full_model(self):
model_id = "runwayml/stable-diffusion-v1-5"
pipe = TextToVideoZeroPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
generator = torch.Generator(device="cuda").manual_seed(0)
prompt = "A bear is playing a guitar on Times Square"
result = pipe(prompt=prompt, generator=generator).images
expected_result = load_pt(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text-to-video/A bear is playing a guitar on Times Square.pt"
)
assert_mean_pixel_difference(result, expected_result)
| 0
|
hf_public_repos/diffusers/tests/pipelines
|
hf_public_repos/diffusers/tests/pipelines/text_to_video_synthesis/test_video_to_video.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNet3DConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
is_flaky,
nightly,
numpy_cosine_similarity_distance,
skip_mps,
torch_device,
)
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class VideoToVideoSDPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = VideoToVideoSDPipeline
params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"}
batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"}
required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"}
test_attention_slicing = False
# No `output_type`.
required_optional_params = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
]
)
def get_dummy_components(self):
torch.manual_seed(0)
unet = UNet3DConditionModel(
block_out_channels=(4, 8),
layers_per_block=1,
sample_size=32,
in_channels=4,
out_channels=4,
down_block_types=("CrossAttnDownBlock3D", "DownBlock3D"),
up_block_types=("UpBlock3D", "CrossAttnUpBlock3D"),
cross_attention_dim=32,
attention_head_dim=4,
norm_num_groups=2,
)
scheduler = DDIMScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
clip_sample=True,
set_alpha_to_one=False,
)
torch.manual_seed(0)
vae = AutoencoderKL(
block_out_channels=[
8,
],
in_channels=3,
out_channels=3,
down_block_types=[
"DownEncoderBlock2D",
],
up_block_types=["UpDecoderBlock2D"],
latent_channels=4,
sample_size=32,
norm_num_groups=2,
)
torch.manual_seed(0)
text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
hidden_act="gelu",
projection_dim=512,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
components = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def get_dummy_inputs(self, device, seed=0):
# 3 frames
video = floats_tensor((1, 3, 3, 32, 32), rng=random.Random(seed)).to(device)
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def test_text_to_video_default_case(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
sd_pipe = VideoToVideoSDPipeline(**components)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
inputs["output_type"] = "np"
frames = sd_pipe(**inputs).frames
image_slice = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
expected_slice = np.array([162.0, 136.0, 132.0, 140.0, 139.0, 137.0, 169.0, 134.0, 132.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@is_flaky()
def test_save_load_optional_components(self):
super().test_save_load_optional_components(expected_max_difference=0.001)
@is_flaky()
def test_dict_tuple_outputs_equivalent(self):
super().test_dict_tuple_outputs_equivalent()
@is_flaky()
def test_save_load_local(self):
super().test_save_load_local()
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),
reason="XFormers attention is only available with CUDA and `xformers` installed",
)
def test_xformers_attention_forwardGenerator_pass(self):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False, expected_max_diff=5e-3)
# (todo): sayakpaul
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
def test_inference_batch_consistent(self):
pass
# (todo): sayakpaul
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
def test_inference_batch_single_identical(self):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.")
def test_num_images_per_prompt(self):
pass
def test_progress_bar(self):
return super().test_progress_bar()
@nightly
@skip_mps
class VideoToVideoSDPipelineSlowTests(unittest.TestCase):
def test_two_step_model(self):
pipe = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16)
pipe.enable_model_cpu_offload()
# 10 frames
generator = torch.Generator(device="cpu").manual_seed(0)
video = torch.randn((1, 10, 3, 320, 576), generator=generator)
prompt = "Spiderman is surfing"
video_frames = pipe(prompt, video=video, generator=generator, num_inference_steps=3, output_type="pt").frames
expected_array = np.array([-0.9770508, -0.8027344, -0.62646484, -0.8334961, -0.7573242])
output_array = video_frames.cpu().numpy()[0, 0, 0, 0, -5:]
assert numpy_cosine_similarity_distance(expected_array, output_array) < 1e-2
| 0
|
hf_public_repos/diffusers/tests/pipelines
|
hf_public_repos/diffusers/tests/pipelines/consistency_models/test_consistency_models.py
|
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNet2DModel,
)
from diffusers.utils.testing_utils import (
enable_full_determinism,
nightly,
require_torch_2,
require_torch_gpu,
torch_device,
)
from diffusers.utils.torch_utils import randn_tensor
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class ConsistencyModelPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = ConsistencyModelPipeline
params = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
batch_params = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
required_optional_params = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
]
)
@property
def dummy_uncond_unet(self):
unet = UNet2DModel.from_pretrained(
"diffusers/consistency-models-test",
subfolder="test_unet",
)
return unet
@property
def dummy_cond_unet(self):
unet = UNet2DModel.from_pretrained(
"diffusers/consistency-models-test",
subfolder="test_unet_class_cond",
)
return unet
def get_dummy_components(self, class_cond=False):
if class_cond:
unet = self.dummy_cond_unet
else:
unet = self.dummy_uncond_unet
# Default to CM multistep sampler
scheduler = CMStochasticIterativeScheduler(
num_train_timesteps=40,
sigma_min=0.002,
sigma_max=80.0,
)
components = {
"unet": unet,
"scheduler": scheduler,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def test_consistency_model_pipeline_multistep(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
pipe = ConsistencyModelPipeline(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
image = pipe(**inputs).images
assert image.shape == (1, 32, 32, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_consistency_model_pipeline_multistep_class_cond(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components(class_cond=True)
pipe = ConsistencyModelPipeline(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
inputs["class_labels"] = 0
image = pipe(**inputs).images
assert image.shape == (1, 32, 32, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_consistency_model_pipeline_onestep(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
pipe = ConsistencyModelPipeline(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
inputs["num_inference_steps"] = 1
inputs["timesteps"] = None
image = pipe(**inputs).images
assert image.shape == (1, 32, 32, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_consistency_model_pipeline_onestep_class_cond(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components(class_cond=True)
pipe = ConsistencyModelPipeline(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
inputs["num_inference_steps"] = 1
inputs["timesteps"] = None
inputs["class_labels"] = 0
image = pipe(**inputs).images
assert image.shape == (1, 32, 32, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
@nightly
@require_torch_gpu
class ConsistencyModelPipelineSlowTests(unittest.TestCase):
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def get_inputs(self, seed=0, get_fixed_latents=False, device="cpu", dtype=torch.float32, shape=(1, 3, 64, 64)):
generator = torch.manual_seed(seed)
inputs = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
latents = self.get_fixed_latents(seed=seed, device=device, dtype=dtype, shape=shape)
inputs["latents"] = latents
return inputs
def get_fixed_latents(self, seed=0, device="cpu", dtype=torch.float32, shape=(1, 3, 64, 64)):
if isinstance(device, str):
device = torch.device(device)
generator = torch.Generator(device=device).manual_seed(seed)
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
return latents
def test_consistency_model_cd_multistep(self):
unet = UNet2DModel.from_pretrained("diffusers/consistency_models", subfolder="diffusers_cd_imagenet64_l2")
scheduler = CMStochasticIterativeScheduler(
num_train_timesteps=40,
sigma_min=0.002,
sigma_max=80.0,
)
pipe = ConsistencyModelPipeline(unet=unet, scheduler=scheduler)
pipe.to(torch_device=torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs()
image = pipe(**inputs).images
assert image.shape == (1, 64, 64, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.0146, 0.0158, 0.0092, 0.0086, 0.0000, 0.0000, 0.0000, 0.0000, 0.0058])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_consistency_model_cd_onestep(self):
unet = UNet2DModel.from_pretrained("diffusers/consistency_models", subfolder="diffusers_cd_imagenet64_l2")
scheduler = CMStochasticIterativeScheduler(
num_train_timesteps=40,
sigma_min=0.002,
sigma_max=80.0,
)
pipe = ConsistencyModelPipeline(unet=unet, scheduler=scheduler)
pipe.to(torch_device=torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs()
inputs["num_inference_steps"] = 1
inputs["timesteps"] = None
image = pipe(**inputs).images
assert image.shape == (1, 64, 64, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.0059, 0.0003, 0.0000, 0.0023, 0.0052, 0.0007, 0.0165, 0.0081, 0.0095])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
@require_torch_2
def test_consistency_model_cd_multistep_flash_attn(self):
unet = UNet2DModel.from_pretrained("diffusers/consistency_models", subfolder="diffusers_cd_imagenet64_l2")
scheduler = CMStochasticIterativeScheduler(
num_train_timesteps=40,
sigma_min=0.002,
sigma_max=80.0,
)
pipe = ConsistencyModelPipeline(unet=unet, scheduler=scheduler)
pipe.to(torch_device=torch_device, torch_dtype=torch.float16)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs(get_fixed_latents=True, device=torch_device)
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False):
image = pipe(**inputs).images
assert image.shape == (1, 64, 64, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.1845, 0.1371, 0.1211, 0.2035, 0.1954, 0.1323, 0.1773, 0.1593, 0.1314])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
@require_torch_2
def test_consistency_model_cd_onestep_flash_attn(self):
unet = UNet2DModel.from_pretrained("diffusers/consistency_models", subfolder="diffusers_cd_imagenet64_l2")
scheduler = CMStochasticIterativeScheduler(
num_train_timesteps=40,
sigma_min=0.002,
sigma_max=80.0,
)
pipe = ConsistencyModelPipeline(unet=unet, scheduler=scheduler)
pipe.to(torch_device=torch_device, torch_dtype=torch.float16)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs(get_fixed_latents=True, device=torch_device)
inputs["num_inference_steps"] = 1
inputs["timesteps"] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False):
image = pipe(**inputs).images
assert image.shape == (1, 64, 64, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.1623, 0.2009, 0.2387, 0.1731, 0.1168, 0.1202, 0.2031, 0.1327, 0.2447])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
| 0
|
hf_public_repos/diffusers/tests/pipelines
|
hf_public_repos/diffusers/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNet2DConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils.testing_utils import floats_tensor, nightly, require_torch_gpu, torch_device
class SafeDiffusionPipelineFastTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def dummy_image(self):
batch_size = 1
num_channels = 3
sizes = (32, 32)
image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device)
return image
@property
def dummy_cond_unet(self):
torch.manual_seed(0)
model = UNet2DConditionModel(
block_out_channels=(32, 64),
layers_per_block=2,
sample_size=32,
in_channels=4,
out_channels=4,
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
cross_attention_dim=32,
)
return model
@property
def dummy_vae(self):
torch.manual_seed(0)
model = AutoencoderKL(
block_out_channels=[32, 64],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
)
return model
@property
def dummy_text_encoder(self):
torch.manual_seed(0)
config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
)
return CLIPTextModel(config)
@property
def dummy_extractor(self):
def extract(*args, **kwargs):
class Out:
def __init__(self):
self.pixel_values = torch.ones([0])
def to(self, device):
self.pixel_values.to(device)
return self
return Out()
return extract
def test_safe_diffusion_ddim(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
unet = self.dummy_cond_unet
scheduler = DDIMScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
clip_sample=False,
set_alpha_to_one=False,
)
vae = self.dummy_vae
bert = self.dummy_text_encoder
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
# make sure here that pndm scheduler skips prk
sd_pipe = StableDiffusionPipeline(
unet=unet,
scheduler=scheduler,
vae=vae,
text_encoder=bert,
tokenizer=tokenizer,
safety_checker=None,
feature_extractor=self.dummy_extractor,
)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
prompt = "A painting of a squirrel eating a burger"
generator = torch.Generator(device=device).manual_seed(0)
output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np")
image = output.images
generator = torch.Generator(device=device).manual_seed(0)
image_from_tuple = sd_pipe(
[prompt],
generator=generator,
guidance_scale=6.0,
num_inference_steps=2,
output_type="np",
return_dict=False,
)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def test_stable_diffusion_pndm(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
unet = self.dummy_cond_unet
scheduler = PNDMScheduler(skip_prk_steps=True)
vae = self.dummy_vae
bert = self.dummy_text_encoder
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
# make sure here that pndm scheduler skips prk
sd_pipe = StableDiffusionPipeline(
unet=unet,
scheduler=scheduler,
vae=vae,
text_encoder=bert,
tokenizer=tokenizer,
safety_checker=None,
feature_extractor=self.dummy_extractor,
)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
prompt = "A painting of a squirrel eating a burger"
generator = torch.Generator(device=device).manual_seed(0)
output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np")
image = output.images
generator = torch.Generator(device=device).manual_seed(0)
image_from_tuple = sd_pipe(
[prompt],
generator=generator,
guidance_scale=6.0,
num_inference_steps=2,
output_type="np",
return_dict=False,
)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def test_stable_diffusion_no_safety_checker(self):
pipe = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe", safety_checker=None
)
assert isinstance(pipe, StableDiffusionPipeline)
assert isinstance(pipe.scheduler, LMSDiscreteScheduler)
assert pipe.safety_checker is None
image = pipe("example prompt", num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(tmpdirname)
pipe = StableDiffusionPipeline.from_pretrained(tmpdirname)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
image = pipe("example prompt", num_inference_steps=2).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda", "This test requires a GPU")
def test_stable_diffusion_fp16(self):
"""Test that stable diffusion works with fp16"""
unet = self.dummy_cond_unet
scheduler = PNDMScheduler(skip_prk_steps=True)
vae = self.dummy_vae
bert = self.dummy_text_encoder
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
# put models in fp16
unet = unet.half()
vae = vae.half()
bert = bert.half()
# make sure here that pndm scheduler skips prk
sd_pipe = StableDiffusionPipeline(
unet=unet,
scheduler=scheduler,
vae=vae,
text_encoder=bert,
tokenizer=tokenizer,
safety_checker=None,
feature_extractor=self.dummy_extractor,
)
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
prompt = "A painting of a squirrel eating a burger"
image = sd_pipe([prompt], num_inference_steps=2, output_type="np").images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class SafeDiffusionPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_harm_safe_stable_diffusion(self):
sd_pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None)
sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
prompt = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
seed = 4003660346
guidance_scale = 7
# without safety guidance (sld_guidance_scale = 0)
generator = torch.manual_seed(seed)
output = sd_pipe(
[prompt],
generator=generator,
guidance_scale=guidance_scale,
num_inference_steps=50,
output_type="np",
width=512,
height=512,
sld_guidance_scale=0,
)
image = output.images
image_slice = image[0, -3:, -3:, -1]
expected_slice = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
# without safety guidance (strong configuration)
generator = torch.manual_seed(seed)
output = sd_pipe(
[prompt],
generator=generator,
guidance_scale=guidance_scale,
num_inference_steps=50,
output_type="np",
width=512,
height=512,
sld_guidance_scale=2000,
sld_warmup_steps=7,
sld_threshold=0.025,
sld_momentum_scale=0.5,
sld_mom_beta=0.7,
)
image = output.images
image_slice = image[0, -3:, -3:, -1]
expected_slice = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def test_nudity_safe_stable_diffusion(self):
sd_pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None)
sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
prompt = "padme amidala taking a bath artwork, safe for work, no nudity"
seed = 2734971755
guidance_scale = 7
generator = torch.manual_seed(seed)
output = sd_pipe(
[prompt],
generator=generator,
guidance_scale=guidance_scale,
num_inference_steps=50,
output_type="np",
width=512,
height=512,
sld_guidance_scale=0,
)
image = output.images
image_slice = image[0, -3:, -3:, -1]
expected_slice = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
generator = torch.manual_seed(seed)
output = sd_pipe(
[prompt],
generator=generator,
guidance_scale=guidance_scale,
num_inference_steps=50,
output_type="np",
width=512,
height=512,
sld_guidance_scale=2000,
sld_warmup_steps=7,
sld_threshold=0.025,
sld_momentum_scale=0.5,
sld_mom_beta=0.7,
)
image = output.images
image_slice = image[0, -3:, -3:, -1]
expected_slice = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def test_nudity_safetychecker_safe_stable_diffusion(self):
sd_pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
prompt = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
seed = 1044355234
guidance_scale = 12
generator = torch.manual_seed(seed)
output = sd_pipe(
[prompt],
generator=generator,
guidance_scale=guidance_scale,
num_inference_steps=50,
output_type="np",
width=512,
height=512,
sld_guidance_scale=0,
)
image = output.images
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-7
generator = torch.manual_seed(seed)
output = sd_pipe(
[prompt],
generator=generator,
guidance_scale=guidance_scale,
num_inference_steps=50,
output_type="np",
width=512,
height=512,
sld_guidance_scale=2000,
sld_warmup_steps=7,
sld_threshold=0.025,
sld_momentum_scale=0.5,
sld_mom_beta=0.7,
)
image = output.images
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 0
|
hf_public_repos/diffusers/tests/pipelines
|
hf_public_repos/diffusers/tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImg2ImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNet2DConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import load_image
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_numpy,
nightly,
require_torch_gpu,
torch_device,
)
enable_full_determinism()
class AltDiffusionImg2ImgPipelineFastTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def dummy_image(self):
batch_size = 1
num_channels = 3
sizes = (32, 32)
image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device)
return image
@property
def dummy_cond_unet(self):
torch.manual_seed(0)
model = UNet2DConditionModel(
block_out_channels=(32, 64),
layers_per_block=2,
sample_size=32,
in_channels=4,
out_channels=4,
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
cross_attention_dim=32,
)
return model
@property
def dummy_vae(self):
torch.manual_seed(0)
model = AutoencoderKL(
block_out_channels=[32, 64],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
)
return model
@property
def dummy_text_encoder(self):
torch.manual_seed(0)
config = RobertaSeriesConfig(
hidden_size=32,
project_dim=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=5006,
)
return RobertaSeriesModelWithTransformation(config)
@property
def dummy_extractor(self):
def extract(*args, **kwargs):
class Out:
def __init__(self):
self.pixel_values = torch.ones([0])
def to(self, device):
self.pixel_values.to(device)
return self
return Out()
return extract
def test_stable_diffusion_img2img_default_case(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
unet = self.dummy_cond_unet
scheduler = PNDMScheduler(skip_prk_steps=True)
vae = self.dummy_vae
bert = self.dummy_text_encoder
tokenizer = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta")
tokenizer.model_max_length = 77
init_image = self.dummy_image.to(device)
init_image = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
alt_pipe = AltDiffusionImg2ImgPipeline(
unet=unet,
scheduler=scheduler,
vae=vae,
text_encoder=bert,
tokenizer=tokenizer,
safety_checker=None,
feature_extractor=self.dummy_extractor,
image_encoder=None,
)
alt_pipe.image_processor = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=True)
alt_pipe = alt_pipe.to(device)
alt_pipe.set_progress_bar_config(disable=None)
prompt = "A painting of a squirrel eating a burger"
generator = torch.Generator(device=device).manual_seed(0)
output = alt_pipe(
[prompt],
generator=generator,
guidance_scale=6.0,
num_inference_steps=2,
output_type="np",
image=init_image,
)
image = output.images
generator = torch.Generator(device=device).manual_seed(0)
image_from_tuple = alt_pipe(
[prompt],
generator=generator,
guidance_scale=6.0,
num_inference_steps=2,
output_type="np",
image=init_image,
return_dict=False,
)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
expected_slice = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5e-3
@unittest.skipIf(torch_device != "cuda", "This test requires a GPU")
def test_stable_diffusion_img2img_fp16(self):
"""Test that stable diffusion img2img works with fp16"""
unet = self.dummy_cond_unet
scheduler = PNDMScheduler(skip_prk_steps=True)
vae = self.dummy_vae
bert = self.dummy_text_encoder
tokenizer = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta")
tokenizer.model_max_length = 77
init_image = self.dummy_image.to(torch_device)
# put models in fp16
unet = unet.half()
vae = vae.half()
bert = bert.half()
# make sure here that pndm scheduler skips prk
alt_pipe = AltDiffusionImg2ImgPipeline(
unet=unet,
scheduler=scheduler,
vae=vae,
text_encoder=bert,
tokenizer=tokenizer,
safety_checker=None,
feature_extractor=self.dummy_extractor,
image_encoder=None,
)
alt_pipe.image_processor = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=False)
alt_pipe = alt_pipe.to(torch_device)
alt_pipe.set_progress_bar_config(disable=None)
prompt = "A painting of a squirrel eating a burger"
generator = torch.manual_seed(0)
image = alt_pipe(
[prompt],
generator=generator,
num_inference_steps=2,
output_type="np",
image=init_image,
).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda", "This test requires a GPU")
def test_stable_diffusion_img2img_pipeline_multiple_of_8(self):
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg"
)
# resize to resolution that is divisible by 8 but not 16 or 32
init_image = init_image.resize((760, 504))
model_id = "BAAI/AltDiffusion"
pipe = AltDiffusionImg2ImgPipeline.from_pretrained(
model_id,
safety_checker=None,
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
prompt = "A fantasy landscape, trending on artstation"
generator = torch.manual_seed(0)
output = pipe(
prompt=prompt,
image=init_image,
strength=0.75,
guidance_scale=7.5,
generator=generator,
output_type="np",
)
image = output.images[0]
image_slice = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
expected_slice = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@nightly
@require_torch_gpu
class AltDiffusionImg2ImgPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_stable_diffusion_img2img_pipeline_default(self):
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg"
)
init_image = init_image.resize((768, 512))
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy"
)
model_id = "BAAI/AltDiffusion"
pipe = AltDiffusionImg2ImgPipeline.from_pretrained(
model_id,
safety_checker=None,
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
prompt = "A fantasy landscape, trending on artstation"
generator = torch.manual_seed(0)
output = pipe(
prompt=prompt,
image=init_image,
strength=0.75,
guidance_scale=7.5,
generator=generator,
output_type="np",
)
image = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image).max() < 1e-2
| 0
|
hf_public_repos/diffusers/tests/pipelines
|
hf_public_repos/diffusers/tests/pipelines/altdiffusion/test_alt_diffusion.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNet2DConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device
from ..pipeline_params import (
TEXT_TO_IMAGE_BATCH_PARAMS,
TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS,
TEXT_TO_IMAGE_IMAGE_PARAMS,
TEXT_TO_IMAGE_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class AltDiffusionPipelineFastTests(
PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase
):
pipeline_class = AltDiffusionPipeline
params = TEXT_TO_IMAGE_PARAMS
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
image_params = TEXT_TO_IMAGE_IMAGE_PARAMS
image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS
def get_dummy_components(self):
torch.manual_seed(0)
unet = UNet2DConditionModel(
block_out_channels=(32, 64),
layers_per_block=2,
sample_size=32,
in_channels=4,
out_channels=4,
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
cross_attention_dim=32,
)
scheduler = DDIMScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
clip_sample=False,
set_alpha_to_one=False,
)
torch.manual_seed(0)
vae = AutoencoderKL(
block_out_channels=[32, 64],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
)
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0)
text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=32,
projection_dim=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=5002,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta")
tokenizer.model_max_length = 77
components = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
"image_encoder": None,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def test_attention_slicing_forward_pass(self):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3)
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
def test_alt_diffusion_ddim(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
torch.manual_seed(0)
text_encoder_config = RobertaSeriesConfig(
hidden_size=32,
project_dim=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
vocab_size=5002,
)
# TODO: remove after fixing the non-deterministic text encoder
text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
components["text_encoder"] = text_encoder
alt_pipe = AltDiffusionPipeline(**components)
alt_pipe = alt_pipe.to(device)
alt_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
inputs["prompt"] = "A photo of an astronaut"
output = alt_pipe(**inputs)
image = output.images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093]
)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def test_alt_diffusion_pndm(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
components["scheduler"] = PNDMScheduler(skip_prk_steps=True)
torch.manual_seed(0)
text_encoder_config = RobertaSeriesConfig(
hidden_size=32,
project_dim=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
vocab_size=5002,
)
# TODO: remove after fixing the non-deterministic text encoder
text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
components["text_encoder"] = text_encoder
alt_pipe = AltDiffusionPipeline(**components)
alt_pipe = alt_pipe.to(device)
alt_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
output = alt_pipe(**inputs)
image = output.images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237]
)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@nightly
@require_torch_gpu
class AltDiffusionPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_alt_diffusion(self):
# make sure here that pndm scheduler skips prk
alt_pipe = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion", safety_checker=None)
alt_pipe = alt_pipe.to(torch_device)
alt_pipe.set_progress_bar_config(disable=None)
prompt = "A painting of a squirrel eating a burger"
generator = torch.manual_seed(0)
output = alt_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=20, output_type="np")
image = output.images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
expected_slice = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def test_alt_diffusion_fast_ddim(self):
scheduler = DDIMScheduler.from_pretrained("BAAI/AltDiffusion", subfolder="scheduler")
alt_pipe = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion", scheduler=scheduler, safety_checker=None)
alt_pipe = alt_pipe.to(torch_device)
alt_pipe.set_progress_bar_config(disable=None)
prompt = "A painting of a squirrel eating a burger"
generator = torch.manual_seed(0)
output = alt_pipe([prompt], generator=generator, num_inference_steps=2, output_type="numpy")
image = output.images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
expected_slice = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 0
|
hf_public_repos/diffusers/tests/pipelines
|
hf_public_repos/diffusers/tests/pipelines/blipdiffusion/test_blipdiffusion.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTokenizer
from transformers.models.blip_2.configuration_blip_2 import Blip2Config
from transformers.models.clip.configuration_clip import CLIPTextConfig
from diffusers import AutoencoderKL, BlipDiffusionPipeline, PNDMScheduler, UNet2DConditionModel
from diffusers.utils.testing_utils import enable_full_determinism
from src.diffusers.pipelines.blip_diffusion.blip_image_processing import BlipImageProcessor
from src.diffusers.pipelines.blip_diffusion.modeling_blip2 import Blip2QFormerModel
from src.diffusers.pipelines.blip_diffusion.modeling_ctx_clip import ContextCLIPTextModel
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class BlipDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = BlipDiffusionPipeline
params = [
"prompt",
"reference_image",
"source_subject_category",
"target_subject_category",
]
batch_params = [
"prompt",
"reference_image",
"source_subject_category",
"target_subject_category",
]
required_optional_params = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"neg_prompt",
"guidance_scale",
"prompt_strength",
"prompt_reps",
]
def get_dummy_components(self):
torch.manual_seed(0)
text_encoder_config = CLIPTextConfig(
vocab_size=1000,
hidden_size=16,
intermediate_size=16,
projection_dim=16,
num_hidden_layers=1,
num_attention_heads=1,
max_position_embeddings=77,
)
text_encoder = ContextCLIPTextModel(text_encoder_config)
vae = AutoencoderKL(
in_channels=4,
out_channels=4,
down_block_types=("DownEncoderBlock2D",),
up_block_types=("UpDecoderBlock2D",),
block_out_channels=(32,),
layers_per_block=1,
act_fn="silu",
latent_channels=4,
norm_num_groups=16,
sample_size=16,
)
blip_vision_config = {
"hidden_size": 16,
"intermediate_size": 16,
"num_hidden_layers": 1,
"num_attention_heads": 1,
"image_size": 224,
"patch_size": 14,
"hidden_act": "quick_gelu",
}
blip_qformer_config = {
"vocab_size": 1000,
"hidden_size": 16,
"num_hidden_layers": 1,
"num_attention_heads": 1,
"intermediate_size": 16,
"max_position_embeddings": 512,
"cross_attention_frequency": 1,
"encoder_hidden_size": 16,
}
qformer_config = Blip2Config(
vision_config=blip_vision_config,
qformer_config=blip_qformer_config,
num_query_tokens=16,
tokenizer="hf-internal-testing/tiny-random-bert",
)
qformer = Blip2QFormerModel(qformer_config)
unet = UNet2DConditionModel(
block_out_channels=(16, 32),
norm_num_groups=16,
layers_per_block=1,
sample_size=16,
in_channels=4,
out_channels=4,
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
cross_attention_dim=16,
)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
scheduler = PNDMScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
set_alpha_to_one=False,
skip_prk_steps=True,
)
vae.eval()
qformer.eval()
text_encoder.eval()
image_processor = BlipImageProcessor()
components = {
"text_encoder": text_encoder,
"vae": vae,
"qformer": qformer,
"unet": unet,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def get_dummy_inputs(self, device, seed=0):
np.random.seed(seed)
reference_image = np.random.rand(32, 32, 3) * 255
reference_image = Image.fromarray(reference_image.astype("uint8")).convert("RGBA")
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "swimming underwater",
"generator": generator,
"reference_image": reference_image,
"source_subject_category": "dog",
"target_subject_category": "dog",
"height": 32,
"width": 32,
"guidance_scale": 7.5,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def test_blipdiffusion(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
image = pipe(**self.get_dummy_inputs(device))[0]
image_slice = image[0, -3:, -3:, 0]
assert image.shape == (1, 16, 16, 4)
expected_slice = np.array([0.7096, 0.5900, 0.6703, 0.4032, 0.7766, 0.3629, 0.5447, 0.4149, 0.8172])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {image_slice.flatten()}, but got {image_slice.flatten()}"
| 0
|
hf_public_repos/diffusers/tests
|
hf_public_repos/diffusers/tests/schedulers/test_scheduler_unclip.py
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
# UnCLIPScheduler is a modified DDPMScheduler with a subset of the configuration.
class UnCLIPSchedulerTest(SchedulerCommonTest):
scheduler_classes = (UnCLIPScheduler,)
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1000,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**kwargs)
return config
def test_timesteps(self):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_variance_type(self):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=variance)
def test_clip_sample(self):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=clip_sample)
def test_clip_sample_range(self):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=clip_sample_range)
def test_prediction_type(self):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=prediction_type)
def test_time_indices(self):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=time_step, prev_timestep=prev_timestep)
def test_variance_fixed_small_log(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(variance_type="fixed_small_log")
scheduler = scheduler_class(**scheduler_config)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 1.0000e-10)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0549625)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.9994987)) < 1e-5
def test_variance_learned_range(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(variance_type="learned_range")
scheduler = scheduler_class(**scheduler_config)
predicted_variance = 0.5
assert scheduler._get_variance(1, predicted_variance=predicted_variance) - -10.1712790 < 1e-5
assert scheduler._get_variance(487, predicted_variance=predicted_variance) - -5.7998052 < 1e-5
assert scheduler._get_variance(999, predicted_variance=predicted_variance) - -0.0010011 < 1e-5
def test_full_loop(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
timesteps = scheduler.timesteps
model = self.dummy_model()
sample = self.dummy_sample_deter
generator = torch.manual_seed(0)
for i, t in enumerate(timesteps):
# 1. predict noise residual
residual = model(sample, t)
# 2. predict previous mean of sample x_t-1
pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample
sample = pred_prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 252.2682495) < 1e-2
assert abs(result_mean.item() - 0.3284743) < 1e-3
def test_full_loop_skip_timesteps(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(25)
timesteps = scheduler.timesteps
model = self.dummy_model()
sample = self.dummy_sample_deter
generator = torch.manual_seed(0)
for i, t in enumerate(timesteps):
# 1. predict noise residual
residual = model(sample, t)
if i + 1 == timesteps.shape[0]:
prev_timestep = None
else:
prev_timestep = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
pred_prev_sample = scheduler.step(
residual, t, sample, prev_timestep=prev_timestep, generator=generator
).prev_sample
sample = pred_prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 258.2044983) < 1e-2
assert abs(result_mean.item() - 0.3362038) < 1e-3
def test_trained_betas(self):
pass
def test_add_noise_device(self):
pass
| 0
|
hf_public_repos/diffusers/tests
|
hf_public_repos/diffusers/tests/schedulers/test_scheduler_kdpm2_discrete.py
|
import torch
from diffusers import KDPM2DiscreteScheduler
from diffusers.utils.testing_utils import torch_device
from .test_schedulers import SchedulerCommonTest
class KDPM2DiscreteSchedulerTest(SchedulerCommonTest):
scheduler_classes = (KDPM2DiscreteScheduler,)
num_inference_steps = 10
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1100,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**kwargs)
return config
def test_timesteps(self):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_betas(self):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]):
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
def test_schedules(self):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=schedule)
def test_prediction_type(self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=prediction_type)
def test_full_loop_with_v_prediction(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(prediction_type="v_prediction")
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
sample = sample.to(torch_device)
for i, t in enumerate(scheduler.timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07) < 1e-2
assert abs(result_mean.item() - 6.1112e-10) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972e-07) < 1e-2
assert abs(result_mean.item() - 0.0002) < 1e-3
def test_full_loop_no_noise(self):
if torch_device == "mps":
return
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
sample = sample.to(torch_device)
for i, t in enumerate(scheduler.timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125) < 1e-2
assert abs(result_mean.item() - 0.0266) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125) < 1e-2
assert abs(result_mean.item() - 0.0266) < 1e-3
def test_full_loop_device(self):
if torch_device == "mps":
return
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps, device=torch_device)
model = self.dummy_model()
sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
if str(torch_device).startswith("cpu"):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125) < 1e-2
assert abs(result_mean.item() - 0.0266) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125) < 1e-2
assert abs(result_mean.item() - 0.0266) < 1e-3
def test_full_loop_with_noise(self):
if torch_device == "mps":
return
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
sample = sample.to(torch_device)
# add noise
t_start = self.num_inference_steps - 2
noise = self.dummy_noise_deter
noise = noise.to(sample.device)
timesteps = scheduler.timesteps[t_start * scheduler.order :]
sample = scheduler.add_noise(sample, noise, timesteps[:1])
for i, t in enumerate(timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 70408.4062) < 1e-2, f" expected result sum 70408.4062, but get {result_sum}"
assert abs(result_mean.item() - 91.6776) < 1e-3, f" expected result mean 91.6776, but get {result_mean}"
| 0
|
hf_public_repos/diffusers/tests
|
hf_public_repos/diffusers/tests/schedulers/test_scheduler_deis.py
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class DEISMultistepSchedulerTest(SchedulerCommonTest):
scheduler_classes = (DEISMultistepScheduler,)
forward_default_kwargs = (("num_inference_steps", 25),)
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**kwargs)
return config
def check_over_configs(self, time_step=0, **config):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
sample = self.dummy_sample
residual = 0.1 * sample
dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residuals
scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
new_scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residuals
new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order]
output, new_output = sample, sample
for t in range(time_step, time_step + scheduler.config.solver_order + 1):
t = scheduler.timesteps[t]
output = scheduler.step(residual, t, output, **kwargs).prev_sample
new_output = new_scheduler.step(residual, t, new_output, **kwargs).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def test_from_save_pretrained(self):
pass
def check_over_forward(self, time_step=0, **forward_kwargs):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
sample = self.dummy_sample
residual = 0.1 * sample
dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residuals (must be after setting timesteps)
scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
# copy over dummy past residuals
new_scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residual (must be after setting timesteps)
new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order]
output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def full_loop(self, scheduler=None, **config):
if scheduler is None:
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = 10
model = self.dummy_model()
sample = self.dummy_sample_deter
scheduler.set_timesteps(num_inference_steps)
for i, t in enumerate(scheduler.timesteps):
residual = model(sample, t)
sample = scheduler.step(residual, t, sample).prev_sample
return sample
def test_step_shape(self):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
sample = self.dummy_sample
residual = 0.1 * sample
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
scheduler.set_timesteps(num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10]
scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order]
time_step_0 = scheduler.timesteps[5]
time_step_1 = scheduler.timesteps[6]
output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample
output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample
self.assertEqual(output_0.shape, sample.shape)
self.assertEqual(output_0.shape, output_1.shape)
def test_switch(self):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
scheduler = DEISMultistepScheduler(**self.get_scheduler_config())
sample = self.full_loop(scheduler=scheduler)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.23916) < 1e-3
scheduler = DPMSolverSinglestepScheduler.from_config(scheduler.config)
scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config)
scheduler = UniPCMultistepScheduler.from_config(scheduler.config)
scheduler = DEISMultistepScheduler.from_config(scheduler.config)
sample = self.full_loop(scheduler=scheduler)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.23916) < 1e-3
def test_timesteps(self):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_thresholding(self):
self.check_over_configs(thresholding=False)
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=True,
prediction_type=prediction_type,
sample_max_value=threshold,
algorithm_type="deis",
solver_order=order,
solver_type=solver_type,
)
def test_prediction_type(self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=prediction_type)
def test_solver_order_and_type(self):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=order,
solver_type=solver_type,
prediction_type=prediction_type,
algorithm_type=algorithm_type,
)
sample = self.full_loop(
solver_order=order,
solver_type=solver_type,
prediction_type=prediction_type,
algorithm_type=algorithm_type,
)
assert not torch.isnan(sample).any(), "Samples have nan numbers"
def test_lower_order_final(self):
self.check_over_configs(lower_order_final=True)
self.check_over_configs(lower_order_final=False)
def test_inference_steps(self):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=num_inference_steps, time_step=0)
def test_full_loop_no_noise(self):
sample = self.full_loop()
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.23916) < 1e-3
def test_full_loop_with_v_prediction(self):
sample = self.full_loop(prediction_type="v_prediction")
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.091) < 1e-3
def test_fp16_support(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(thresholding=True, dynamic_thresholding_ratio=0)
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = 10
model = self.dummy_model()
sample = self.dummy_sample_deter.half()
scheduler.set_timesteps(num_inference_steps)
for i, t in enumerate(scheduler.timesteps):
residual = model(sample, t)
sample = scheduler.step(residual, t, sample).prev_sample
assert sample.dtype == torch.float16
def test_full_loop_with_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = 10
t_start = 8
model = self.dummy_model()
sample = self.dummy_sample_deter
scheduler.set_timesteps(num_inference_steps)
# add noise
noise = self.dummy_noise_deter
timesteps = scheduler.timesteps[t_start * scheduler.order :]
sample = scheduler.add_noise(sample, noise, timesteps[:1])
for i, t in enumerate(timesteps):
residual = model(sample, t)
sample = scheduler.step(residual, t, sample).prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 315.3016) < 1e-2, f" expected result sum 315.3016, but get {result_sum}"
assert abs(result_mean.item() - 0.41054) < 1e-3, f" expected result mean 0.41054, but get {result_mean}"
| 0
|
hf_public_repos/diffusers/tests
|
hf_public_repos/diffusers/tests/schedulers/test_scheduler_lms.py
|
import torch
from diffusers import LMSDiscreteScheduler
from diffusers.utils.testing_utils import torch_device
from .test_schedulers import SchedulerCommonTest
class LMSDiscreteSchedulerTest(SchedulerCommonTest):
scheduler_classes = (LMSDiscreteScheduler,)
num_inference_steps = 10
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1100,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**kwargs)
return config
def test_timesteps(self):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_betas(self):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]):
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
def test_schedules(self):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=schedule)
def test_prediction_type(self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=prediction_type)
def test_time_indices(self):
for t in [0, 500, 800]:
self.check_over_forward(time_step=t)
def test_full_loop_no_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(scheduler.timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 1006.388) < 1e-2
assert abs(result_mean.item() - 1.31) < 1e-3
def test_full_loop_with_v_prediction(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(prediction_type="v_prediction")
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(scheduler.timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 0.0017) < 1e-2
assert abs(result_mean.item() - 2.2676e-06) < 1e-3
def test_full_loop_device(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps, device=torch_device)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
sample = sample.to(torch_device)
for i, t in enumerate(scheduler.timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 1006.388) < 1e-2
assert abs(result_mean.item() - 1.31) < 1e-3
def test_full_loop_device_karras_sigmas(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config, use_karras_sigmas=True)
scheduler.set_timesteps(self.num_inference_steps, device=torch_device)
model = self.dummy_model()
sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma
sample = sample.to(torch_device)
for t in scheduler.timesteps:
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 3812.9927) < 2e-2
assert abs(result_mean.item() - 4.9648) < 1e-3
def test_full_loop_with_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
# add noise
t_start = self.num_inference_steps - 2
noise = self.dummy_noise_deter
timesteps = scheduler.timesteps[t_start * scheduler.order :]
sample = scheduler.add_noise(sample, noise, timesteps[:1])
for i, t in enumerate(timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 27663.6895) < 1e-2
assert abs(result_mean.item() - 36.0204) < 1e-3
| 0
|
hf_public_repos/diffusers/tests
|
hf_public_repos/diffusers/tests/schedulers/test_scheduler_ddim_parallel.py
|
# Copyright 2023 ParaDiGMS authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class DDIMParallelSchedulerTest(SchedulerCommonTest):
scheduler_classes = (DDIMParallelScheduler,)
forward_default_kwargs = (("eta", 0.0), ("num_inference_steps", 50))
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**kwargs)
return config
def full_loop(self, **config):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
num_inference_steps, eta = 10, 0.0
model = self.dummy_model()
sample = self.dummy_sample_deter
scheduler.set_timesteps(num_inference_steps)
for t in scheduler.timesteps:
residual = model(sample, t)
sample = scheduler.step(residual, t, sample, eta).prev_sample
return sample
def test_timesteps(self):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_steps_offset(self):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=steps_offset)
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(steps_offset=1)
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps, torch.LongTensor([801, 601, 401, 201, 1]))
def test_betas(self):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
def test_schedules(self):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=schedule)
def test_prediction_type(self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=prediction_type)
def test_clip_sample(self):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=clip_sample)
def test_timestep_spacing(self):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=timestep_spacing)
def test_rescale_betas_zero_snr(self):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr)
def test_thresholding(self):
self.check_over_configs(thresholding=False)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=True,
prediction_type=prediction_type,
sample_max_value=threshold,
)
def test_time_indices(self):
for t in [1, 10, 49]:
self.check_over_forward(time_step=t)
def test_inference_steps(self):
for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500]):
self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps)
def test_eta(self):
for t, eta in zip([1, 10, 49], [0.0, 0.5, 1.0]):
self.check_over_forward(time_step=t, eta=eta)
def test_variance(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
assert torch.sum(torch.abs(scheduler._get_variance(0, 0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420, 400) - 0.14771)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980, 960) - 0.32460)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0, 0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487, 486) - 0.00979)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999, 998) - 0.02)) < 1e-5
def test_batch_step_no_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
num_inference_steps, eta = 10, 0.0
scheduler.set_timesteps(num_inference_steps)
model = self.dummy_model()
sample1 = self.dummy_sample_deter
sample2 = self.dummy_sample_deter + 0.1
sample3 = self.dummy_sample_deter - 0.1
per_sample_batch = sample1.shape[0]
samples = torch.stack([sample1, sample2, sample3], dim=0)
timesteps = torch.arange(num_inference_steps)[0:3, None].repeat(1, per_sample_batch)
residual = model(samples.flatten(0, 1), timesteps.flatten(0, 1))
pred_prev_sample = scheduler.batch_step_no_noise(residual, timesteps.flatten(0, 1), samples.flatten(0, 1), eta)
result_sum = torch.sum(torch.abs(pred_prev_sample))
result_mean = torch.mean(torch.abs(pred_prev_sample))
assert abs(result_sum.item() - 1147.7904) < 1e-2
assert abs(result_mean.item() - 0.4982) < 1e-3
def test_full_loop_no_noise(self):
sample = self.full_loop()
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 172.0067) < 1e-2
assert abs(result_mean.item() - 0.223967) < 1e-3
def test_full_loop_with_v_prediction(self):
sample = self.full_loop(prediction_type="v_prediction")
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 52.5302) < 1e-2
assert abs(result_mean.item() - 0.0684) < 1e-3
def test_full_loop_with_set_alpha_to_one(self):
# We specify different beta, so that the first alpha is 0.99
sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01)
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 149.8295) < 1e-2
assert abs(result_mean.item() - 0.1951) < 1e-3
def test_full_loop_with_no_set_alpha_to_one(self):
# We specify different beta, so that the first alpha is 0.99
sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01)
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 149.0784) < 1e-2
assert abs(result_mean.item() - 0.1941) < 1e-3
def test_full_loop_with_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
num_inference_steps, eta = 10, 0.0
t_start = 8
model = self.dummy_model()
sample = self.dummy_sample_deter
scheduler.set_timesteps(num_inference_steps)
# add noise
noise = self.dummy_noise_deter
timesteps = scheduler.timesteps[t_start * scheduler.order :]
sample = scheduler.add_noise(sample, noise, timesteps[:1])
for t in timesteps:
residual = model(sample, t)
sample = scheduler.step(residual, t, sample, eta).prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 354.5418) < 1e-2, f" expected result sum 354.5418, but get {result_sum}"
assert abs(result_mean.item() - 0.4616) < 1e-3, f" expected result mean 0.4616, but get {result_mean}"
| 0
|
hf_public_repos/diffusers/tests
|
hf_public_repos/diffusers/tests/schedulers/test_scheduler_dpm_sde.py
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils.testing_utils import require_torchsde, torch_device
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class DPMSolverSDESchedulerTest(SchedulerCommonTest):
scheduler_classes = (DPMSolverSDEScheduler,)
num_inference_steps = 10
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1100,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**kwargs)
return config
def test_timesteps(self):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_betas(self):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]):
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
def test_schedules(self):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=schedule)
def test_prediction_type(self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=prediction_type)
def test_full_loop_no_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
sample = sample.to(torch_device)
for i, t in enumerate(scheduler.timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875) < 1e-2
assert abs(result_mean.item() - 0.2178705964565277) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406) < 1e-2
assert abs(result_mean.item() - 0.22342906892299652) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562) < 1e-2
assert abs(result_mean.item() - 0.211619570851326) < 1e-3
def test_full_loop_with_v_prediction(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(prediction_type="v_prediction")
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
sample = sample.to(torch_device)
for i, t in enumerate(scheduler.timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453) < 1e-2
assert abs(result_mean.item() - 0.16226289014816284) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703) < 1e-2
assert abs(result_mean.item() - 0.16688326001167297) < 1e-3
else:
assert abs(result_sum.item() - 119.8487548828125) < 1e-2
assert abs(result_mean.item() - 0.1560530662536621) < 1e-3
def test_full_loop_device(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps, device=torch_device)
model = self.dummy_model()
sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938) < 1e-2
assert abs(result_mean.item() - 0.21805934607982635) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312) < 1e-2
assert abs(result_mean.item() - 0.22342908382415771) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562) < 1e-2
assert abs(result_mean.item() - 0.211619570851326) < 1e-3
def test_full_loop_device_karras_sigmas(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config, use_karras_sigmas=True)
scheduler.set_timesteps(self.num_inference_steps, device=torch_device)
model = self.dummy_model()
sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma
sample = sample.to(torch_device)
for t in scheduler.timesteps:
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811) < 1e-2
else:
assert abs(result_sum.item() - 170.3135223388672) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811) < 1e-2
| 0
|
hf_public_repos/diffusers/tests
|
hf_public_repos/diffusers/tests/schedulers/test_scheduler_kdpm2_ancestral.py
|
import torch
from diffusers import KDPM2AncestralDiscreteScheduler
from diffusers.utils.testing_utils import torch_device
from .test_schedulers import SchedulerCommonTest
class KDPM2AncestralDiscreteSchedulerTest(SchedulerCommonTest):
scheduler_classes = (KDPM2AncestralDiscreteScheduler,)
num_inference_steps = 10
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1100,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**kwargs)
return config
def test_timesteps(self):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_betas(self):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]):
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
def test_schedules(self):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=schedule)
def test_full_loop_no_noise(self):
if torch_device == "mps":
return
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps)
generator = torch.manual_seed(0)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
sample = sample.to(torch_device)
for i, t in enumerate(scheduler.timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample, generator=generator)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 13849.3877) < 1e-2
assert abs(result_mean.item() - 18.0331) < 5e-3
def test_prediction_type(self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=prediction_type)
def test_full_loop_with_v_prediction(self):
if torch_device == "mps":
return
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(prediction_type="v_prediction")
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
sample = sample.to(torch_device)
generator = torch.manual_seed(0)
for i, t in enumerate(scheduler.timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample, generator=generator)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 328.9970) < 1e-2
assert abs(result_mean.item() - 0.4284) < 1e-3
def test_full_loop_device(self):
if torch_device == "mps":
return
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps, device=torch_device)
generator = torch.manual_seed(0)
model = self.dummy_model()
sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample, generator=generator)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 13849.3818) < 1e-1
assert abs(result_mean.item() - 18.0331) < 1e-3
def test_full_loop_with_noise(self):
if torch_device == "mps":
return
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps)
generator = torch.manual_seed(0)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
# add noise
t_start = self.num_inference_steps - 2
noise = self.dummy_noise_deter
noise = noise.to(sample.device)
timesteps = scheduler.timesteps[t_start * scheduler.order :]
sample = scheduler.add_noise(sample, noise, timesteps[:1])
for i, t in enumerate(timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample, generator=generator)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 93087.0312) < 1e-2, f" expected result sum 93087.0312, but get {result_sum}"
assert abs(result_mean.item() - 121.2071) < 5e-3, f" expected result mean 121.2071, but get {result_mean}"
| 0
|
hf_public_repos/diffusers/tests
|
hf_public_repos/diffusers/tests/schedulers/test_scheduler_lcm.py
|
import tempfile
from typing import Dict, List, Tuple
import torch
from diffusers import LCMScheduler
from diffusers.utils.testing_utils import torch_device
from .test_schedulers import SchedulerCommonTest
class LCMSchedulerTest(SchedulerCommonTest):
scheduler_classes = (LCMScheduler,)
forward_default_kwargs = (("num_inference_steps", 10),)
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1000,
"beta_start": 0.00085,
"beta_end": 0.0120,
"beta_schedule": "scaled_linear",
"prediction_type": "epsilon",
}
config.update(**kwargs)
return config
@property
def default_valid_timestep(self):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
scheduler_config = self.get_scheduler_config()
scheduler = self.scheduler_classes[0](**scheduler_config)
scheduler.set_timesteps(num_inference_steps)
timestep = scheduler.timesteps[-1]
return timestep
def test_timesteps(self):
for timesteps in [100, 500, 1000]:
# 0 is not guaranteed to be in the timestep schedule, but timesteps - 1 is
self.check_over_configs(time_step=timesteps - 1, num_train_timesteps=timesteps)
def test_betas(self):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]):
self.check_over_configs(time_step=self.default_valid_timestep, beta_start=beta_start, beta_end=beta_end)
def test_schedules(self):
for schedule in ["linear", "scaled_linear", "squaredcos_cap_v2"]:
self.check_over_configs(time_step=self.default_valid_timestep, beta_schedule=schedule)
def test_prediction_type(self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(time_step=self.default_valid_timestep, prediction_type=prediction_type)
def test_clip_sample(self):
for clip_sample in [True, False]:
self.check_over_configs(time_step=self.default_valid_timestep, clip_sample=clip_sample)
def test_thresholding(self):
self.check_over_configs(time_step=self.default_valid_timestep, thresholding=False)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
time_step=self.default_valid_timestep,
thresholding=True,
prediction_type=prediction_type,
sample_max_value=threshold,
)
def test_time_indices(self):
# Get default timestep schedule.
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
scheduler_config = self.get_scheduler_config()
scheduler = self.scheduler_classes[0](**scheduler_config)
scheduler.set_timesteps(num_inference_steps)
timesteps = scheduler.timesteps
for t in timesteps:
self.check_over_forward(time_step=t)
def test_inference_steps(self):
# Hardcoded for now
for t, num_inference_steps in zip([99, 39, 39, 19], [10, 25, 26, 50]):
self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps)
# Override test_add_noise_device because the hardcoded num_inference_steps of 100 doesn't work
# for LCMScheduler under default settings
def test_add_noise_device(self, num_inference_steps=10):
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(num_inference_steps)
sample = self.dummy_sample.to(torch_device)
scaled_sample = scheduler.scale_model_input(sample, 0.0)
self.assertEqual(sample.shape, scaled_sample.shape)
noise = torch.randn_like(scaled_sample).to(torch_device)
t = scheduler.timesteps[5][None]
noised = scheduler.add_noise(scaled_sample, noise, t)
self.assertEqual(noised.shape, scaled_sample.shape)
# Override test_from_save_pretrained because it hardcodes a timestep of 1
def test_from_save_pretrained(self):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
for scheduler_class in self.scheduler_classes:
timestep = self.default_valid_timestep
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
sample = self.dummy_sample
residual = 0.1 * sample
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
scheduler.set_timesteps(num_inference_steps)
new_scheduler.set_timesteps(num_inference_steps)
kwargs["generator"] = torch.manual_seed(0)
output = scheduler.step(residual, timestep, sample, **kwargs).prev_sample
kwargs["generator"] = torch.manual_seed(0)
new_output = new_scheduler.step(residual, timestep, sample, **kwargs).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
# Override test_step_shape because uses 0 and 1 as hardcoded timesteps
def test_step_shape(self):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
sample = self.dummy_sample
residual = 0.1 * sample
scheduler.set_timesteps(num_inference_steps)
timestep_0 = scheduler.timesteps[-2]
timestep_1 = scheduler.timesteps[-1]
output_0 = scheduler.step(residual, timestep_0, sample, **kwargs).prev_sample
output_1 = scheduler.step(residual, timestep_1, sample, **kwargs).prev_sample
self.assertEqual(output_0.shape, sample.shape)
self.assertEqual(output_0.shape, output_1.shape)
# Override test_set_scheduler_outputs_equivalence since it uses 0 as a hardcoded timestep
def test_scheduler_outputs_equivalence(self):
def set_nan_tensor_to_zero(t):
t[t != t] = 0
return t
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif isinstance(tuple_object, Dict):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5
),
msg=(
"Tuple and dict output are not equal. Difference:"
f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has"
f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}."
),
)
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", 50)
timestep = self.default_valid_timestep
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
sample = self.dummy_sample
residual = 0.1 * sample
scheduler.set_timesteps(num_inference_steps)
kwargs["generator"] = torch.manual_seed(0)
outputs_dict = scheduler.step(residual, timestep, sample, **kwargs)
scheduler.set_timesteps(num_inference_steps)
kwargs["generator"] = torch.manual_seed(0)
outputs_tuple = scheduler.step(residual, timestep, sample, return_dict=False, **kwargs)
recursive_check(outputs_tuple, outputs_dict)
def full_loop(self, num_inference_steps=10, seed=0, **config):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
model = self.dummy_model()
sample = self.dummy_sample_deter
generator = torch.manual_seed(seed)
scheduler.set_timesteps(num_inference_steps)
for t in scheduler.timesteps:
residual = model(sample, t)
sample = scheduler.step(residual, t, sample, generator).prev_sample
return sample
def test_full_loop_onestep(self):
sample = self.full_loop(num_inference_steps=1)
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
# TODO: get expected sum and mean
assert abs(result_sum.item() - 18.7097) < 1e-3
assert abs(result_mean.item() - 0.0244) < 1e-3
def test_full_loop_multistep(self):
sample = self.full_loop(num_inference_steps=10)
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
# TODO: get expected sum and mean
assert abs(result_sum.item() - 197.7616) < 1e-3
assert abs(result_mean.item() - 0.2575) < 1e-3
def test_custom_timesteps(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
timesteps = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=timesteps)
scheduler_timesteps = scheduler.timesteps
for i, timestep in enumerate(scheduler_timesteps):
if i == len(timesteps) - 1:
expected_prev_t = -1
else:
expected_prev_t = timesteps[i + 1]
prev_t = scheduler.previous_timestep(timestep)
prev_t = prev_t.item()
self.assertEqual(prev_t, expected_prev_t)
def test_custom_timesteps_increasing_order(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
timesteps = [100, 87, 50, 51, 0]
with self.assertRaises(ValueError, msg="`custom_timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=timesteps)
def test_custom_timesteps_passing_both_num_inference_steps_and_timesteps(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
timesteps = [100, 87, 50, 1, 0]
num_inference_steps = len(timesteps)
with self.assertRaises(ValueError, msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."):
scheduler.set_timesteps(num_inference_steps=num_inference_steps, timesteps=timesteps)
def test_custom_timesteps_too_large(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
timesteps = [scheduler.config.num_train_timesteps]
with self.assertRaises(
ValueError,
msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}",
):
scheduler.set_timesteps(timesteps=timesteps)
| 0
|
hf_public_repos/diffusers/tests
|
hf_public_repos/diffusers/tests/schedulers/test_scheduler_dpm_single.py
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class DPMSolverSinglestepSchedulerTest(SchedulerCommonTest):
scheduler_classes = (DPMSolverSinglestepScheduler,)
forward_default_kwargs = (("num_inference_steps", 25),)
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf"),
"variance_type": None,
}
config.update(**kwargs)
return config
def check_over_configs(self, time_step=0, **config):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
sample = self.dummy_sample
residual = 0.1 * sample
dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residuals
scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
new_scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residuals
new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order]
output, new_output = sample, sample
for t in range(time_step, time_step + scheduler.config.solver_order + 1):
t = scheduler.timesteps[t]
output = scheduler.step(residual, t, output, **kwargs).prev_sample
new_output = new_scheduler.step(residual, t, new_output, **kwargs).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def test_from_save_pretrained(self):
pass
def check_over_forward(self, time_step=0, **forward_kwargs):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
sample = self.dummy_sample
residual = 0.1 * sample
dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residuals (must be after setting timesteps)
scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
# copy over dummy past residuals
new_scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residual (must be after setting timesteps)
new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order]
output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def full_loop(self, scheduler=None, **config):
if scheduler is None:
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = 10
model = self.dummy_model()
sample = self.dummy_sample_deter
scheduler.set_timesteps(num_inference_steps)
for i, t in enumerate(scheduler.timesteps):
residual = model(sample, t)
sample = scheduler.step(residual, t, sample).prev_sample
return sample
def test_full_uneven_loop(self):
scheduler = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
num_inference_steps = 50
model = self.dummy_model()
sample = self.dummy_sample_deter
scheduler.set_timesteps(num_inference_steps)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
residual = model(sample, t)
sample = scheduler.step(residual, t, sample).prev_sample
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.2574) < 1e-3
def test_timesteps(self):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_switch(self):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
scheduler = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
sample = self.full_loop(scheduler=scheduler)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.2791) < 1e-3
scheduler = DEISMultistepScheduler.from_config(scheduler.config)
scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config)
scheduler = UniPCMultistepScheduler.from_config(scheduler.config)
scheduler = DPMSolverSinglestepScheduler.from_config(scheduler.config)
sample = self.full_loop(scheduler=scheduler)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.2791) < 1e-3
def test_thresholding(self):
self.check_over_configs(thresholding=False)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=True,
prediction_type=prediction_type,
sample_max_value=threshold,
algorithm_type="dpmsolver++",
solver_order=order,
solver_type=solver_type,
)
def test_prediction_type(self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=prediction_type)
def test_solver_order_and_type(self):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=order,
solver_type=solver_type,
prediction_type=prediction_type,
algorithm_type=algorithm_type,
)
sample = self.full_loop(
solver_order=order,
solver_type=solver_type,
prediction_type=prediction_type,
algorithm_type=algorithm_type,
)
assert not torch.isnan(sample).any(), "Samples have nan numbers"
def test_lower_order_final(self):
self.check_over_configs(lower_order_final=True)
self.check_over_configs(lower_order_final=False)
def test_lambda_min_clipped(self):
self.check_over_configs(lambda_min_clipped=-float("inf"))
self.check_over_configs(lambda_min_clipped=-5.1)
def test_variance_type(self):
self.check_over_configs(variance_type=None)
self.check_over_configs(variance_type="learned_range")
def test_inference_steps(self):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=num_inference_steps, time_step=0)
def test_full_loop_no_noise(self):
sample = self.full_loop()
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.2791) < 1e-3
def test_full_loop_with_karras(self):
sample = self.full_loop(use_karras_sigmas=True)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.2248) < 1e-3
def test_full_loop_with_v_prediction(self):
sample = self.full_loop(prediction_type="v_prediction")
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.1453) < 1e-3
def test_full_loop_with_karras_and_v_prediction(self):
sample = self.full_loop(prediction_type="v_prediction", use_karras_sigmas=True)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.0649) < 1e-3
def test_fp16_support(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(thresholding=True, dynamic_thresholding_ratio=0)
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = 10
model = self.dummy_model()
sample = self.dummy_sample_deter.half()
scheduler.set_timesteps(num_inference_steps)
for i, t in enumerate(scheduler.timesteps):
residual = model(sample, t)
sample = scheduler.step(residual, t, sample).prev_sample
assert sample.dtype == torch.float16
def test_step_shape(self):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
sample = self.dummy_sample
residual = 0.1 * sample
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
scheduler.set_timesteps(num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10]
scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order]
time_step_0 = scheduler.timesteps[0]
time_step_1 = scheduler.timesteps[1]
output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample
output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample
self.assertEqual(output_0.shape, sample.shape)
self.assertEqual(output_0.shape, output_1.shape)
def test_full_loop_with_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = 10
t_start = 5
model = self.dummy_model()
sample = self.dummy_sample_deter
scheduler.set_timesteps(num_inference_steps)
# add noise
noise = self.dummy_noise_deter
timesteps = scheduler.timesteps[t_start * scheduler.order :]
sample = scheduler.add_noise(sample, noise, timesteps[:1])
for i, t in enumerate(timesteps):
residual = model(sample, t)
sample = scheduler.step(residual, t, sample).prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 269.2187) < 1e-2, f" expected result sum 269.2187, but get {result_sum}"
assert abs(result_mean.item() - 0.3505) < 1e-3, f" expected result mean 0.3505, but get {result_mean}"
| 0
|
hf_public_repos/diffusers/tests
|
hf_public_repos/diffusers/tests/schedulers/test_scheduler_dpm_multi_inverse.py
|
import tempfile
import torch
from diffusers import DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler
from .test_schedulers import SchedulerCommonTest
class DPMSolverMultistepSchedulerTest(SchedulerCommonTest):
scheduler_classes = (DPMSolverMultistepInverseScheduler,)
forward_default_kwargs = (("num_inference_steps", 25),)
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lower_order_final": False,
"lambda_min_clipped": -float("inf"),
"variance_type": None,
}
config.update(**kwargs)
return config
def check_over_configs(self, time_step=0, **config):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
sample = self.dummy_sample
residual = 0.1 * sample
dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residuals
scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
new_scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residuals
new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order]
output, new_output = sample, sample
for t in range(time_step, time_step + scheduler.config.solver_order + 1):
t = scheduler.timesteps[t]
output = scheduler.step(residual, t, output, **kwargs).prev_sample
new_output = new_scheduler.step(residual, t, new_output, **kwargs).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def test_from_save_pretrained(self):
pass
def check_over_forward(self, time_step=0, **forward_kwargs):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
sample = self.dummy_sample
residual = 0.1 * sample
dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residuals (must be after setting timesteps)
scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
# copy over dummy past residuals
new_scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residual (must be after setting timesteps)
new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order]
output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def full_loop(self, scheduler=None, **config):
if scheduler is None:
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = 10
model = self.dummy_model()
sample = self.dummy_sample_deter
scheduler.set_timesteps(num_inference_steps)
for i, t in enumerate(scheduler.timesteps):
residual = model(sample, t)
sample = scheduler.step(residual, t, sample).prev_sample
return sample
def test_step_shape(self):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
sample = self.dummy_sample
residual = 0.1 * sample
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
scheduler.set_timesteps(num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10]
scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order]
time_step_0 = scheduler.timesteps[5]
time_step_1 = scheduler.timesteps[6]
output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample
output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample
self.assertEqual(output_0.shape, sample.shape)
self.assertEqual(output_0.shape, output_1.shape)
def test_timesteps(self):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_thresholding(self):
self.check_over_configs(thresholding=False)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=True,
prediction_type=prediction_type,
sample_max_value=threshold,
algorithm_type="dpmsolver++",
solver_order=order,
solver_type=solver_type,
)
def test_prediction_type(self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=prediction_type)
def test_solver_order_and_type(self):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=order,
solver_type=solver_type,
prediction_type=prediction_type,
algorithm_type=algorithm_type,
)
sample = self.full_loop(
solver_order=order,
solver_type=solver_type,
prediction_type=prediction_type,
algorithm_type=algorithm_type,
)
assert not torch.isnan(sample).any(), "Samples have nan numbers"
def test_lower_order_final(self):
self.check_over_configs(lower_order_final=True)
self.check_over_configs(lower_order_final=False)
def test_lambda_min_clipped(self):
self.check_over_configs(lambda_min_clipped=-float("inf"))
self.check_over_configs(lambda_min_clipped=-5.1)
def test_variance_type(self):
self.check_over_configs(variance_type=None)
self.check_over_configs(variance_type="learned_range")
def test_timestep_spacing(self):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=timestep_spacing)
def test_inference_steps(self):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=num_inference_steps, time_step=0)
def test_full_loop_no_noise(self):
sample = self.full_loop()
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.7047) < 1e-3
def test_full_loop_no_noise_thres(self):
sample = self.full_loop(thresholding=True, dynamic_thresholding_ratio=0.87, sample_max_value=0.5)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 19.8933) < 1e-3
def test_full_loop_with_v_prediction(self):
sample = self.full_loop(prediction_type="v_prediction")
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 1.5194) < 1e-3
def test_full_loop_with_karras_and_v_prediction(self):
sample = self.full_loop(prediction_type="v_prediction", use_karras_sigmas=True)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 1.7833) < 2e-3
def test_switch(self):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
scheduler = DPMSolverMultistepInverseScheduler(**self.get_scheduler_config())
sample = self.full_loop(scheduler=scheduler)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.7047) < 1e-3
scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config)
scheduler = DPMSolverMultistepInverseScheduler.from_config(scheduler.config)
sample = self.full_loop(scheduler=scheduler)
new_result_mean = torch.mean(torch.abs(sample))
assert abs(new_result_mean.item() - result_mean.item()) < 1e-3
def test_fp16_support(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(thresholding=True, dynamic_thresholding_ratio=0)
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = 10
model = self.dummy_model()
sample = self.dummy_sample_deter.half()
scheduler.set_timesteps(num_inference_steps)
for i, t in enumerate(scheduler.timesteps):
residual = model(sample, t)
sample = scheduler.step(residual, t, sample).prev_sample
assert sample.dtype == torch.float16
def test_unique_timesteps(self, **config):
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(scheduler.config.num_train_timesteps)
assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps
| 0
|
hf_public_repos/diffusers/tests
|
hf_public_repos/diffusers/tests/schedulers/test_scheduler_euler_ancestral.py
|
import torch
from diffusers import EulerAncestralDiscreteScheduler
from diffusers.utils.testing_utils import torch_device
from .test_schedulers import SchedulerCommonTest
class EulerAncestralDiscreteSchedulerTest(SchedulerCommonTest):
scheduler_classes = (EulerAncestralDiscreteScheduler,)
num_inference_steps = 10
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1100,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**kwargs)
return config
def test_timesteps(self):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_betas(self):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]):
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
def test_schedules(self):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=schedule)
def test_prediction_type(self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=prediction_type)
def test_full_loop_no_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps)
generator = torch.manual_seed(0)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
sample = sample.to(torch_device)
for i, t in enumerate(scheduler.timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample, generator=generator)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 152.3192) < 1e-2
assert abs(result_mean.item() - 0.1983) < 1e-3
def test_full_loop_with_v_prediction(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(prediction_type="v_prediction")
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps)
generator = torch.manual_seed(0)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
sample = sample.to(torch_device)
for i, t in enumerate(scheduler.timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample, generator=generator)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 108.4439) < 1e-2
assert abs(result_mean.item() - 0.1412) < 1e-3
def test_full_loop_device(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps, device=torch_device)
generator = torch.manual_seed(0)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
sample = sample.to(torch_device)
for t in scheduler.timesteps:
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample, generator=generator)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 152.3192) < 1e-2
assert abs(result_mean.item() - 0.1983) < 1e-3
def test_full_loop_with_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
t_start = self.num_inference_steps - 2
scheduler.set_timesteps(self.num_inference_steps)
generator = torch.manual_seed(0)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
# add noise
noise = self.dummy_noise_deter
noise = noise.to(sample.device)
timesteps = scheduler.timesteps[t_start * scheduler.order :]
sample = scheduler.add_noise(sample, noise, timesteps[:1])
for i, t in enumerate(timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample, generator=generator)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 56163.0508) < 1e-2, f" expected result sum 56163.0508, but get {result_sum}"
assert abs(result_mean.item() - 73.1290) < 1e-3, f" expected result mean 73.1290, but get {result_mean}"
| 0
|
hf_public_repos/diffusers/tests
|
hf_public_repos/diffusers/tests/schedulers/test_scheduler_vq_diffusion.py
|
import torch
import torch.nn.functional as F
from diffusers import VQDiffusionScheduler
from .test_schedulers import SchedulerCommonTest
class VQDiffusionSchedulerTest(SchedulerCommonTest):
scheduler_classes = (VQDiffusionScheduler,)
def get_scheduler_config(self, **kwargs):
config = {
"num_vec_classes": 4097,
"num_train_timesteps": 100,
}
config.update(**kwargs)
return config
def dummy_sample(self, num_vec_classes):
batch_size = 4
height = 8
width = 8
sample = torch.randint(0, num_vec_classes, (batch_size, height * width))
return sample
@property
def dummy_sample_deter(self):
assert False
def dummy_model(self, num_vec_classes):
def model(sample, t, *args):
batch_size, num_latent_pixels = sample.shape
logits = torch.rand((batch_size, num_vec_classes - 1, num_latent_pixels))
return_value = F.log_softmax(logits.double(), dim=1).float()
return return_value
return model
def test_timesteps(self):
for timesteps in [2, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_num_vec_classes(self):
for num_vec_classes in [5, 100, 1000, 4000]:
self.check_over_configs(num_vec_classes=num_vec_classes)
def test_time_indices(self):
for t in [0, 50, 99]:
self.check_over_forward(time_step=t)
def test_add_noise_device(self):
pass
| 0
|
hf_public_repos/diffusers/tests
|
hf_public_repos/diffusers/tests/schedulers/test_scheduler_ddpm_parallel.py
|
# Copyright 2023 ParaDiGMS authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class DDPMParallelSchedulerTest(SchedulerCommonTest):
scheduler_classes = (DDPMParallelScheduler,)
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**kwargs)
return config
def test_timesteps(self):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_betas(self):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
def test_schedules(self):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=schedule)
def test_variance_type(self):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=variance)
def test_clip_sample(self):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=clip_sample)
def test_thresholding(self):
self.check_over_configs(thresholding=False)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=True,
prediction_type=prediction_type,
sample_max_value=threshold,
)
def test_prediction_type(self):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=prediction_type)
def test_time_indices(self):
for t in [0, 500, 999]:
self.check_over_forward(time_step=t)
def test_variance(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.00979)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1e-5
def test_batch_step_no_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
num_trained_timesteps = len(scheduler)
model = self.dummy_model()
sample1 = self.dummy_sample_deter
sample2 = self.dummy_sample_deter + 0.1
sample3 = self.dummy_sample_deter - 0.1
per_sample_batch = sample1.shape[0]
samples = torch.stack([sample1, sample2, sample3], dim=0)
timesteps = torch.arange(num_trained_timesteps)[0:3, None].repeat(1, per_sample_batch)
residual = model(samples.flatten(0, 1), timesteps.flatten(0, 1))
pred_prev_sample = scheduler.batch_step_no_noise(residual, timesteps.flatten(0, 1), samples.flatten(0, 1))
result_sum = torch.sum(torch.abs(pred_prev_sample))
result_mean = torch.mean(torch.abs(pred_prev_sample))
assert abs(result_sum.item() - 1153.1833) < 1e-2
assert abs(result_mean.item() - 0.5005) < 1e-3
def test_full_loop_no_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
num_trained_timesteps = len(scheduler)
model = self.dummy_model()
sample = self.dummy_sample_deter
generator = torch.manual_seed(0)
for t in reversed(range(num_trained_timesteps)):
# 1. predict noise residual
residual = model(sample, t)
# 2. predict previous mean of sample x_t-1
pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample
sample = pred_prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 258.9606) < 1e-2
assert abs(result_mean.item() - 0.3372) < 1e-3
def test_full_loop_with_v_prediction(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(prediction_type="v_prediction")
scheduler = scheduler_class(**scheduler_config)
num_trained_timesteps = len(scheduler)
model = self.dummy_model()
sample = self.dummy_sample_deter
generator = torch.manual_seed(0)
for t in reversed(range(num_trained_timesteps)):
# 1. predict noise residual
residual = model(sample, t)
# 2. predict previous mean of sample x_t-1
pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample
sample = pred_prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 202.0296) < 1e-2
assert abs(result_mean.item() - 0.2631) < 1e-3
def test_custom_timesteps(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
timesteps = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=timesteps)
scheduler_timesteps = scheduler.timesteps
for i, timestep in enumerate(scheduler_timesteps):
if i == len(timesteps) - 1:
expected_prev_t = -1
else:
expected_prev_t = timesteps[i + 1]
prev_t = scheduler.previous_timestep(timestep)
prev_t = prev_t.item()
self.assertEqual(prev_t, expected_prev_t)
def test_custom_timesteps_increasing_order(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
timesteps = [100, 87, 50, 51, 0]
with self.assertRaises(ValueError, msg="`custom_timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=timesteps)
def test_custom_timesteps_passing_both_num_inference_steps_and_timesteps(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
timesteps = [100, 87, 50, 1, 0]
num_inference_steps = len(timesteps)
with self.assertRaises(ValueError, msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."):
scheduler.set_timesteps(num_inference_steps=num_inference_steps, timesteps=timesteps)
def test_custom_timesteps_too_large(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
timesteps = [scheduler.config.num_train_timesteps]
with self.assertRaises(
ValueError,
msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}",
):
scheduler.set_timesteps(timesteps=timesteps)
def test_full_loop_with_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
num_trained_timesteps = len(scheduler)
t_start = num_trained_timesteps - 2
model = self.dummy_model()
sample = self.dummy_sample_deter
generator = torch.manual_seed(0)
# add noise
noise = self.dummy_noise_deter
timesteps = scheduler.timesteps[t_start * scheduler.order :]
sample = scheduler.add_noise(sample, noise, timesteps[:1])
for t in timesteps:
# 1. predict noise residual
residual = model(sample, t)
# 2. predict previous mean of sample x_t-1
pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample
sample = pred_prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 387.9466) < 1e-2, f" expected result sum 387.9466, but get {result_sum}"
assert abs(result_mean.item() - 0.5051) < 1e-3, f" expected result mean 0.5051, but get {result_mean}"
| 0
|
hf_public_repos/diffusers/tests
|
hf_public_repos/diffusers/tests/schedulers/test_scheduler_pndm.py
|
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class PNDMSchedulerTest(SchedulerCommonTest):
scheduler_classes = (PNDMScheduler,)
forward_default_kwargs = (("num_inference_steps", 50),)
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**kwargs)
return config
def check_over_configs(self, time_step=0, **config):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
sample = self.dummy_sample
residual = 0.1 * sample
dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residuals
scheduler.ets = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
new_scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residuals
new_scheduler.ets = dummy_past_residuals[:]
output = scheduler.step_prk(residual, time_step, sample, **kwargs).prev_sample
new_output = new_scheduler.step_prk(residual, time_step, sample, **kwargs).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
output = scheduler.step_plms(residual, time_step, sample, **kwargs).prev_sample
new_output = new_scheduler.step_plms(residual, time_step, sample, **kwargs).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def test_from_save_pretrained(self):
pass
def check_over_forward(self, time_step=0, **forward_kwargs):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
sample = self.dummy_sample
residual = 0.1 * sample
dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residuals (must be after setting timesteps)
scheduler.ets = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
# copy over dummy past residuals
new_scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residual (must be after setting timesteps)
new_scheduler.ets = dummy_past_residuals[:]
output = scheduler.step_prk(residual, time_step, sample, **kwargs).prev_sample
new_output = new_scheduler.step_prk(residual, time_step, sample, **kwargs).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
output = scheduler.step_plms(residual, time_step, sample, **kwargs).prev_sample
new_output = new_scheduler.step_plms(residual, time_step, sample, **kwargs).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def full_loop(self, **config):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = 10
model = self.dummy_model()
sample = self.dummy_sample_deter
scheduler.set_timesteps(num_inference_steps)
for i, t in enumerate(scheduler.prk_timesteps):
residual = model(sample, t)
sample = scheduler.step_prk(residual, t, sample).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
residual = model(sample, t)
sample = scheduler.step_plms(residual, t, sample).prev_sample
return sample
def test_step_shape(self):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
sample = self.dummy_sample
residual = 0.1 * sample
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
scheduler.set_timesteps(num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
scheduler.ets = dummy_past_residuals[:]
output_0 = scheduler.step_prk(residual, 0, sample, **kwargs).prev_sample
output_1 = scheduler.step_prk(residual, 1, sample, **kwargs).prev_sample
self.assertEqual(output_0.shape, sample.shape)
self.assertEqual(output_0.shape, output_1.shape)
output_0 = scheduler.step_plms(residual, 0, sample, **kwargs).prev_sample
output_1 = scheduler.step_plms(residual, 1, sample, **kwargs).prev_sample
self.assertEqual(output_0.shape, sample.shape)
self.assertEqual(output_0.shape, output_1.shape)
def test_timesteps(self):
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_steps_offset(self):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=steps_offset)
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(steps_offset=1)
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps,
torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]
),
)
def test_betas(self):
for beta_start, beta_end in zip([0.0001, 0.001], [0.002, 0.02]):
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
def test_schedules(self):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=schedule)
def test_prediction_type(self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=prediction_type)
def test_time_indices(self):
for t in [1, 5, 10]:
self.check_over_forward(time_step=t)
def test_inference_steps(self):
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100]):
self.check_over_forward(num_inference_steps=num_inference_steps)
def test_pow_of_3_inference_steps(self):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
num_inference_steps = 27
for scheduler_class in self.scheduler_classes:
sample = self.dummy_sample
residual = 0.1 * sample
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(num_inference_steps)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
sample = scheduler.step_prk(residual, t, sample).prev_sample
def test_inference_plms_no_past_residuals(self):
with self.assertRaises(ValueError):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.step_plms(self.dummy_sample, 1, self.dummy_sample).prev_sample
def test_full_loop_no_noise(self):
sample = self.full_loop()
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 198.1318) < 1e-2
assert abs(result_mean.item() - 0.2580) < 1e-3
def test_full_loop_with_v_prediction(self):
sample = self.full_loop(prediction_type="v_prediction")
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 67.3986) < 1e-2
assert abs(result_mean.item() - 0.0878) < 1e-3
def test_full_loop_with_set_alpha_to_one(self):
# We specify different beta, so that the first alpha is 0.99
sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01)
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 230.0399) < 1e-2
assert abs(result_mean.item() - 0.2995) < 1e-3
def test_full_loop_with_no_set_alpha_to_one(self):
# We specify different beta, so that the first alpha is 0.99
sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01)
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 186.9482) < 1e-2
assert abs(result_mean.item() - 0.2434) < 1e-3
| 0
|
hf_public_repos/diffusers/tests
|
hf_public_repos/diffusers/tests/schedulers/test_scheduler_score_sde_ve.py
|
import tempfile
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVeScheduler
class ScoreSdeVeSchedulerTest(unittest.TestCase):
# TODO adapt with class SchedulerCommonTest (scheduler needs Numpy Integration)
scheduler_classes = (ScoreSdeVeScheduler,)
forward_default_kwargs = ()
@property
def dummy_sample(self):
batch_size = 4
num_channels = 3
height = 8
width = 8
sample = torch.rand((batch_size, num_channels, height, width))
return sample
@property
def dummy_sample_deter(self):
batch_size = 4
num_channels = 3
height = 8
width = 8
num_elems = batch_size * num_channels * height * width
sample = torch.arange(num_elems)
sample = sample.reshape(num_channels, height, width, batch_size)
sample = sample / num_elems
sample = sample.permute(3, 0, 1, 2)
return sample
def dummy_model(self):
def model(sample, t, *args):
return sample * t / (t + 1)
return model
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 2000,
"snr": 0.15,
"sigma_min": 0.01,
"sigma_max": 1348,
"sampling_eps": 1e-5,
}
config.update(**kwargs)
return config
def check_over_configs(self, time_step=0, **config):
kwargs = dict(self.forward_default_kwargs)
for scheduler_class in self.scheduler_classes:
sample = self.dummy_sample
residual = 0.1 * sample
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
output = scheduler.step_pred(
residual, time_step, sample, generator=torch.manual_seed(0), **kwargs
).prev_sample
new_output = new_scheduler.step_pred(
residual, time_step, sample, generator=torch.manual_seed(0), **kwargs
).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
output = scheduler.step_correct(residual, sample, generator=torch.manual_seed(0), **kwargs).prev_sample
new_output = new_scheduler.step_correct(
residual, sample, generator=torch.manual_seed(0), **kwargs
).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler correction are not identical"
def check_over_forward(self, time_step=0, **forward_kwargs):
kwargs = dict(self.forward_default_kwargs)
kwargs.update(forward_kwargs)
for scheduler_class in self.scheduler_classes:
sample = self.dummy_sample
residual = 0.1 * sample
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
output = scheduler.step_pred(
residual, time_step, sample, generator=torch.manual_seed(0), **kwargs
).prev_sample
new_output = new_scheduler.step_pred(
residual, time_step, sample, generator=torch.manual_seed(0), **kwargs
).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
output = scheduler.step_correct(residual, sample, generator=torch.manual_seed(0), **kwargs).prev_sample
new_output = new_scheduler.step_correct(
residual, sample, generator=torch.manual_seed(0), **kwargs
).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler correction are not identical"
def test_timesteps(self):
for timesteps in [10, 100, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_sigmas(self):
for sigma_min, sigma_max in zip([0.0001, 0.001, 0.01], [1, 100, 1000]):
self.check_over_configs(sigma_min=sigma_min, sigma_max=sigma_max)
def test_time_indices(self):
for t in [0.1, 0.5, 0.75]:
self.check_over_forward(time_step=t)
def test_full_loop_no_noise(self):
kwargs = dict(self.forward_default_kwargs)
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = 3
model = self.dummy_model()
sample = self.dummy_sample_deter
scheduler.set_sigmas(num_inference_steps)
scheduler.set_timesteps(num_inference_steps)
generator = torch.manual_seed(0)
for i, t in enumerate(scheduler.timesteps):
sigma_t = scheduler.sigmas[i]
for _ in range(scheduler.config.correct_steps):
with torch.no_grad():
model_output = model(sample, sigma_t)
sample = scheduler.step_correct(model_output, sample, generator=generator, **kwargs).prev_sample
with torch.no_grad():
model_output = model(sample, sigma_t)
output = scheduler.step_pred(model_output, t, sample, generator=generator, **kwargs)
sample, _ = output.prev_sample, output.prev_sample_mean
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert np.isclose(result_sum.item(), 14372758528.0)
assert np.isclose(result_mean.item(), 18714530.0)
def test_step_shape(self):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
sample = self.dummy_sample
residual = 0.1 * sample
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
scheduler.set_timesteps(num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
output_0 = scheduler.step_pred(residual, 0, sample, generator=torch.manual_seed(0), **kwargs).prev_sample
output_1 = scheduler.step_pred(residual, 1, sample, generator=torch.manual_seed(0), **kwargs).prev_sample
self.assertEqual(output_0.shape, sample.shape)
self.assertEqual(output_0.shape, output_1.shape)
| 0
|
hf_public_repos/diffusers/tests
|
hf_public_repos/diffusers/tests/schedulers/test_schedulers.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import json
import os
import tempfile
import unittest
import uuid
from typing import Dict, List, Tuple
import numpy as np
import torch
from huggingface_hub import delete_repo
import diffusers
from diffusers import (
CMStochasticIterativeScheduler,
DDIMScheduler,
DEISMultistepScheduler,
DiffusionPipeline,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
IPNDMScheduler,
LMSDiscreteScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
logging,
)
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils.testing_utils import CaptureLogger, torch_device
from ..others.test_utils import TOKEN, USER, is_staging_test
torch.backends.cuda.matmul.allow_tf32 = False
class SchedulerObject(SchedulerMixin, ConfigMixin):
config_name = "config.json"
@register_to_config
def __init__(
self,
a=2,
b=5,
c=(2, 5),
d="for diffusion",
e=[1, 3],
):
pass
class SchedulerObject2(SchedulerMixin, ConfigMixin):
config_name = "config.json"
@register_to_config
def __init__(
self,
a=2,
b=5,
c=(2, 5),
d="for diffusion",
f=[1, 3],
):
pass
class SchedulerObject3(SchedulerMixin, ConfigMixin):
config_name = "config.json"
@register_to_config
def __init__(
self,
a=2,
b=5,
c=(2, 5),
d="for diffusion",
e=[1, 3],
f=[1, 3],
):
pass
class SchedulerBaseTests(unittest.TestCase):
def test_save_load_from_different_config(self):
obj = SchedulerObject()
# mock add obj class to `diffusers`
setattr(diffusers, "SchedulerObject", SchedulerObject)
logger = logging.get_logger("diffusers.configuration_utils")
with tempfile.TemporaryDirectory() as tmpdirname:
obj.save_config(tmpdirname)
with CaptureLogger(logger) as cap_logger_1:
config = SchedulerObject2.load_config(tmpdirname)
new_obj_1 = SchedulerObject2.from_config(config)
# now save a config parameter that is not expected
with open(os.path.join(tmpdirname, SchedulerObject.config_name), "r") as f:
data = json.load(f)
data["unexpected"] = True
with open(os.path.join(tmpdirname, SchedulerObject.config_name), "w") as f:
json.dump(data, f)
with CaptureLogger(logger) as cap_logger_2:
config = SchedulerObject.load_config(tmpdirname)
new_obj_2 = SchedulerObject.from_config(config)
with CaptureLogger(logger) as cap_logger_3:
config = SchedulerObject2.load_config(tmpdirname)
new_obj_3 = SchedulerObject2.from_config(config)
assert new_obj_1.__class__ == SchedulerObject2
assert new_obj_2.__class__ == SchedulerObject
assert new_obj_3.__class__ == SchedulerObject2
assert cap_logger_1.out == ""
assert (
cap_logger_2.out
== "The config attributes {'unexpected': True} were passed to SchedulerObject, but are not expected and"
" will"
" be ignored. Please verify your config.json configuration file.\n"
)
assert cap_logger_2.out.replace("SchedulerObject", "SchedulerObject2") == cap_logger_3.out
def test_save_load_compatible_schedulers(self):
SchedulerObject2._compatibles = ["SchedulerObject"]
SchedulerObject._compatibles = ["SchedulerObject2"]
obj = SchedulerObject()
# mock add obj class to `diffusers`
setattr(diffusers, "SchedulerObject", SchedulerObject)
setattr(diffusers, "SchedulerObject2", SchedulerObject2)
logger = logging.get_logger("diffusers.configuration_utils")
with tempfile.TemporaryDirectory() as tmpdirname:
obj.save_config(tmpdirname)
# now save a config parameter that is expected by another class, but not origin class
with open(os.path.join(tmpdirname, SchedulerObject.config_name), "r") as f:
data = json.load(f)
data["f"] = [0, 0]
data["unexpected"] = True
with open(os.path.join(tmpdirname, SchedulerObject.config_name), "w") as f:
json.dump(data, f)
with CaptureLogger(logger) as cap_logger:
config = SchedulerObject.load_config(tmpdirname)
new_obj = SchedulerObject.from_config(config)
assert new_obj.__class__ == SchedulerObject
assert (
cap_logger.out
== "The config attributes {'unexpected': True} were passed to SchedulerObject, but are not expected and"
" will"
" be ignored. Please verify your config.json configuration file.\n"
)
def test_save_load_from_different_config_comp_schedulers(self):
SchedulerObject3._compatibles = ["SchedulerObject", "SchedulerObject2"]
SchedulerObject2._compatibles = ["SchedulerObject", "SchedulerObject3"]
SchedulerObject._compatibles = ["SchedulerObject2", "SchedulerObject3"]
obj = SchedulerObject()
# mock add obj class to `diffusers`
setattr(diffusers, "SchedulerObject", SchedulerObject)
setattr(diffusers, "SchedulerObject2", SchedulerObject2)
setattr(diffusers, "SchedulerObject3", SchedulerObject3)
logger = logging.get_logger("diffusers.configuration_utils")
logger.setLevel(diffusers.logging.INFO)
with tempfile.TemporaryDirectory() as tmpdirname:
obj.save_config(tmpdirname)
with CaptureLogger(logger) as cap_logger_1:
config = SchedulerObject.load_config(tmpdirname)
new_obj_1 = SchedulerObject.from_config(config)
with CaptureLogger(logger) as cap_logger_2:
config = SchedulerObject2.load_config(tmpdirname)
new_obj_2 = SchedulerObject2.from_config(config)
with CaptureLogger(logger) as cap_logger_3:
config = SchedulerObject3.load_config(tmpdirname)
new_obj_3 = SchedulerObject3.from_config(config)
assert new_obj_1.__class__ == SchedulerObject
assert new_obj_2.__class__ == SchedulerObject2
assert new_obj_3.__class__ == SchedulerObject3
assert cap_logger_1.out == ""
assert cap_logger_2.out == "{'f'} was not found in config. Values will be initialized to default values.\n"
assert cap_logger_3.out == "{'f'} was not found in config. Values will be initialized to default values.\n"
def test_default_arguments_not_in_config(self):
pipe = DiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe", torch_dtype=torch.float16
)
assert pipe.scheduler.__class__ == DDIMScheduler
# Default for DDIMScheduler
assert pipe.scheduler.config.timestep_spacing == "leading"
# Switch to a different one, verify we use the default for that class
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
assert pipe.scheduler.config.timestep_spacing == "linspace"
# Override with kwargs
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
assert pipe.scheduler.config.timestep_spacing == "trailing"
# Verify overridden kwargs stick
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
assert pipe.scheduler.config.timestep_spacing == "trailing"
# And stick
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
assert pipe.scheduler.config.timestep_spacing == "trailing"
def test_default_solver_type_after_switch(self):
pipe = DiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe", torch_dtype=torch.float16
)
assert pipe.scheduler.__class__ == DDIMScheduler
pipe.scheduler = DEISMultistepScheduler.from_config(pipe.scheduler.config)
assert pipe.scheduler.config.solver_type == "logrho"
# Switch to UniPC, verify the solver is the default
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
assert pipe.scheduler.config.solver_type == "bh2"
class SchedulerCommonTest(unittest.TestCase):
scheduler_classes = ()
forward_default_kwargs = ()
@property
def dummy_sample(self):
batch_size = 4
num_channels = 3
height = 8
width = 8
sample = torch.rand((batch_size, num_channels, height, width))
return sample
@property
def dummy_noise_deter(self):
batch_size = 4
num_channels = 3
height = 8
width = 8
num_elems = batch_size * num_channels * height * width
sample = torch.arange(num_elems).flip(-1)
sample = sample.reshape(num_channels, height, width, batch_size)
sample = sample / num_elems
sample = sample.permute(3, 0, 1, 2)
return sample
@property
def dummy_sample_deter(self):
batch_size = 4
num_channels = 3
height = 8
width = 8
num_elems = batch_size * num_channels * height * width
sample = torch.arange(num_elems)
sample = sample.reshape(num_channels, height, width, batch_size)
sample = sample / num_elems
sample = sample.permute(3, 0, 1, 2)
return sample
def get_scheduler_config(self):
raise NotImplementedError
def dummy_model(self):
def model(sample, t, *args):
# if t is a tensor, match the number of dimensions of sample
if isinstance(t, torch.Tensor):
num_dims = len(sample.shape)
# pad t with 1s to match num_dims
t = t.reshape(-1, *(1,) * (num_dims - 1)).to(sample.device).to(sample.dtype)
return sample * t / (t + 1)
return model
def check_over_configs(self, time_step=0, **config):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
for scheduler_class in self.scheduler_classes:
# TODO(Suraj) - delete the following two lines once DDPM, DDIM, and PNDM have timesteps casted to float by default
if scheduler_class in (EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler):
time_step = float(time_step)
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
if scheduler_class == CMStochasticIterativeScheduler:
# Get valid timestep based on sigma_max, which should always be in timestep schedule.
scaled_sigma_max = scheduler.sigma_to_t(scheduler.config.sigma_max)
time_step = scaled_sigma_max
if scheduler_class == VQDiffusionScheduler:
num_vec_classes = scheduler_config["num_vec_classes"]
sample = self.dummy_sample(num_vec_classes)
model = self.dummy_model(num_vec_classes)
residual = model(sample, time_step)
else:
sample = self.dummy_sample
residual = 0.1 * sample
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
scheduler.set_timesteps(num_inference_steps)
new_scheduler.set_timesteps(num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
# Make sure `scale_model_input` is invoked to prevent a warning
if scheduler_class == CMStochasticIterativeScheduler:
# Get valid timestep based on sigma_max, which should always be in timestep schedule.
_ = scheduler.scale_model_input(sample, scaled_sigma_max)
_ = new_scheduler.scale_model_input(sample, scaled_sigma_max)
elif scheduler_class != VQDiffusionScheduler:
_ = scheduler.scale_model_input(sample, 0)
_ = new_scheduler.scale_model_input(sample, 0)
# Set the seed before step() as some schedulers are stochastic like EulerAncestralDiscreteScheduler, EulerDiscreteScheduler
if "generator" in set(inspect.signature(scheduler.step).parameters.keys()):
kwargs["generator"] = torch.manual_seed(0)
output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
if "generator" in set(inspect.signature(scheduler.step).parameters.keys()):
kwargs["generator"] = torch.manual_seed(0)
new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def check_over_forward(self, time_step=0, **forward_kwargs):
kwargs = dict(self.forward_default_kwargs)
kwargs.update(forward_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
for scheduler_class in self.scheduler_classes:
if scheduler_class in (EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler):
time_step = float(time_step)
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
if scheduler_class == VQDiffusionScheduler:
num_vec_classes = scheduler_config["num_vec_classes"]
sample = self.dummy_sample(num_vec_classes)
model = self.dummy_model(num_vec_classes)
residual = model(sample, time_step)
else:
sample = self.dummy_sample
residual = 0.1 * sample
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
scheduler.set_timesteps(num_inference_steps)
new_scheduler.set_timesteps(num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
if "generator" in set(inspect.signature(scheduler.step).parameters.keys()):
kwargs["generator"] = torch.manual_seed(0)
output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
if "generator" in set(inspect.signature(scheduler.step).parameters.keys()):
kwargs["generator"] = torch.manual_seed(0)
new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def test_from_save_pretrained(self):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
for scheduler_class in self.scheduler_classes:
timestep = 1
if scheduler_class in (EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler):
timestep = float(timestep)
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
if scheduler_class == CMStochasticIterativeScheduler:
# Get valid timestep based on sigma_max, which should always be in timestep schedule.
timestep = scheduler.sigma_to_t(scheduler.config.sigma_max)
if scheduler_class == VQDiffusionScheduler:
num_vec_classes = scheduler_config["num_vec_classes"]
sample = self.dummy_sample(num_vec_classes)
model = self.dummy_model(num_vec_classes)
residual = model(sample, timestep)
else:
sample = self.dummy_sample
residual = 0.1 * sample
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
scheduler.set_timesteps(num_inference_steps)
new_scheduler.set_timesteps(num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
if "generator" in set(inspect.signature(scheduler.step).parameters.keys()):
kwargs["generator"] = torch.manual_seed(0)
output = scheduler.step(residual, timestep, sample, **kwargs).prev_sample
if "generator" in set(inspect.signature(scheduler.step).parameters.keys()):
kwargs["generator"] = torch.manual_seed(0)
new_output = new_scheduler.step(residual, timestep, sample, **kwargs).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def test_compatibles(self):
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
assert all(c is not None for c in scheduler.compatibles)
for comp_scheduler_cls in scheduler.compatibles:
comp_scheduler = comp_scheduler_cls.from_config(scheduler.config)
assert comp_scheduler is not None
new_scheduler = scheduler_class.from_config(comp_scheduler.config)
new_scheduler_config = {k: v for k, v in new_scheduler.config.items() if k in scheduler.config}
scheduler_diff = {k: v for k, v in new_scheduler.config.items() if k not in scheduler.config}
# make sure that configs are essentially identical
assert new_scheduler_config == dict(scheduler.config)
# make sure that only differences are for configs that are not in init
init_keys = inspect.signature(scheduler_class.__init__).parameters.keys()
assert set(scheduler_diff.keys()).intersection(set(init_keys)) == set()
def test_from_pretrained(self):
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_pretrained(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
# `_use_default_values` should not exist for just saved & loaded scheduler
scheduler_config = dict(scheduler.config)
del scheduler_config["_use_default_values"]
assert scheduler_config == new_scheduler.config
def test_step_shape(self):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
timestep_0 = 1
timestep_1 = 0
for scheduler_class in self.scheduler_classes:
if scheduler_class in (EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler):
timestep_0 = float(timestep_0)
timestep_1 = float(timestep_1)
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
if scheduler_class == VQDiffusionScheduler:
num_vec_classes = scheduler_config["num_vec_classes"]
sample = self.dummy_sample(num_vec_classes)
model = self.dummy_model(num_vec_classes)
residual = model(sample, timestep_0)
else:
sample = self.dummy_sample
residual = 0.1 * sample
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
scheduler.set_timesteps(num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
output_0 = scheduler.step(residual, timestep_0, sample, **kwargs).prev_sample
output_1 = scheduler.step(residual, timestep_1, sample, **kwargs).prev_sample
self.assertEqual(output_0.shape, sample.shape)
self.assertEqual(output_0.shape, output_1.shape)
def test_scheduler_outputs_equivalence(self):
def set_nan_tensor_to_zero(t):
t[t != t] = 0
return t
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif isinstance(tuple_object, Dict):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5
),
msg=(
"Tuple and dict output are not equal. Difference:"
f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has"
f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}."
),
)
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", 50)
timestep = 0
if len(self.scheduler_classes) > 0 and self.scheduler_classes[0] == IPNDMScheduler:
timestep = 1
for scheduler_class in self.scheduler_classes:
if scheduler_class in (EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler):
timestep = float(timestep)
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
if scheduler_class == CMStochasticIterativeScheduler:
# Get valid timestep based on sigma_max, which should always be in timestep schedule.
timestep = scheduler.sigma_to_t(scheduler.config.sigma_max)
if scheduler_class == VQDiffusionScheduler:
num_vec_classes = scheduler_config["num_vec_classes"]
sample = self.dummy_sample(num_vec_classes)
model = self.dummy_model(num_vec_classes)
residual = model(sample, timestep)
else:
sample = self.dummy_sample
residual = 0.1 * sample
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
scheduler.set_timesteps(num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
# Set the seed before state as some schedulers are stochastic like EulerAncestralDiscreteScheduler, EulerDiscreteScheduler
if "generator" in set(inspect.signature(scheduler.step).parameters.keys()):
kwargs["generator"] = torch.manual_seed(0)
outputs_dict = scheduler.step(residual, timestep, sample, **kwargs)
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
scheduler.set_timesteps(num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
# Set the seed before state as some schedulers are stochastic like EulerAncestralDiscreteScheduler, EulerDiscreteScheduler
if "generator" in set(inspect.signature(scheduler.step).parameters.keys()):
kwargs["generator"] = torch.manual_seed(0)
outputs_tuple = scheduler.step(residual, timestep, sample, return_dict=False, **kwargs)
recursive_check(outputs_tuple, outputs_dict)
def test_scheduler_public_api(self):
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
if scheduler_class != VQDiffusionScheduler:
self.assertTrue(
hasattr(scheduler, "init_noise_sigma"),
f"{scheduler_class} does not implement a required attribute `init_noise_sigma`",
)
self.assertTrue(
hasattr(scheduler, "scale_model_input"),
(
f"{scheduler_class} does not implement a required class method `scale_model_input(sample,"
" timestep)`"
),
)
self.assertTrue(
hasattr(scheduler, "step"),
f"{scheduler_class} does not implement a required class method `step(...)`",
)
if scheduler_class != VQDiffusionScheduler:
sample = self.dummy_sample
if scheduler_class == CMStochasticIterativeScheduler:
# Get valid timestep based on sigma_max, which should always be in timestep schedule.
scaled_sigma_max = scheduler.sigma_to_t(scheduler.config.sigma_max)
scaled_sample = scheduler.scale_model_input(sample, scaled_sigma_max)
else:
scaled_sample = scheduler.scale_model_input(sample, 0.0)
self.assertEqual(sample.shape, scaled_sample.shape)
def test_add_noise_device(self):
for scheduler_class in self.scheduler_classes:
if scheduler_class == IPNDMScheduler:
continue
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(100)
sample = self.dummy_sample.to(torch_device)
if scheduler_class == CMStochasticIterativeScheduler:
# Get valid timestep based on sigma_max, which should always be in timestep schedule.
scaled_sigma_max = scheduler.sigma_to_t(scheduler.config.sigma_max)
scaled_sample = scheduler.scale_model_input(sample, scaled_sigma_max)
else:
scaled_sample = scheduler.scale_model_input(sample, 0.0)
self.assertEqual(sample.shape, scaled_sample.shape)
noise = torch.randn_like(scaled_sample).to(torch_device)
t = scheduler.timesteps[5][None]
noised = scheduler.add_noise(scaled_sample, noise, t)
self.assertEqual(noised.shape, scaled_sample.shape)
def test_deprecated_kwargs(self):
for scheduler_class in self.scheduler_classes:
has_kwarg_in_model_class = "kwargs" in inspect.signature(scheduler_class.__init__).parameters
has_deprecated_kwarg = len(scheduler_class._deprecated_kwargs) > 0
if has_kwarg_in_model_class and not has_deprecated_kwarg:
raise ValueError(
f"{scheduler_class} has `**kwargs` in its __init__ method but has not defined any deprecated"
" kwargs under the `_deprecated_kwargs` class attribute. Make sure to either remove `**kwargs` if"
" there are no deprecated arguments or add the deprecated argument with `_deprecated_kwargs ="
" [<deprecated_argument>]`"
)
if not has_kwarg_in_model_class and has_deprecated_kwarg:
raise ValueError(
f"{scheduler_class} doesn't have `**kwargs` in its __init__ method but has defined deprecated"
" kwargs under the `_deprecated_kwargs` class attribute. Make sure to either add the `**kwargs`"
f" argument to {self.model_class}.__init__ if there are deprecated arguments or remove the"
" deprecated argument from `_deprecated_kwargs = [<deprecated_argument>]`"
)
def test_trained_betas(self):
for scheduler_class in self.scheduler_classes:
if scheduler_class in (VQDiffusionScheduler, CMStochasticIterativeScheduler):
continue
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config, trained_betas=np.array([0.1, 0.3]))
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_pretrained(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
assert scheduler.betas.tolist() == new_scheduler.betas.tolist()
def test_getattr_is_correct(self):
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
# save some things to test
scheduler.dummy_attribute = 5
scheduler.register_to_config(test_attribute=5)
logger = logging.get_logger("diffusers.configuration_utils")
# 30 for warning
logger.setLevel(30)
with CaptureLogger(logger) as cap_logger:
assert hasattr(scheduler, "dummy_attribute")
assert getattr(scheduler, "dummy_attribute") == 5
assert scheduler.dummy_attribute == 5
# no warning should be thrown
assert cap_logger.out == ""
logger = logging.get_logger("diffusers.schedulers.schedulering_utils")
# 30 for warning
logger.setLevel(30)
with CaptureLogger(logger) as cap_logger:
assert hasattr(scheduler, "save_pretrained")
fn = scheduler.save_pretrained
fn_1 = getattr(scheduler, "save_pretrained")
assert fn == fn_1
# no warning should be thrown
assert cap_logger.out == ""
# warning should be thrown
with self.assertWarns(FutureWarning):
assert scheduler.test_attribute == 5
with self.assertWarns(FutureWarning):
assert getattr(scheduler, "test_attribute") == 5
with self.assertRaises(AttributeError) as error:
scheduler.does_not_exist
assert str(error.exception) == f"'{type(scheduler).__name__}' object has no attribute 'does_not_exist'"
@is_staging_test
class SchedulerPushToHubTester(unittest.TestCase):
identifier = uuid.uuid4()
repo_id = f"test-scheduler-{identifier}"
org_repo_id = f"valid_org/{repo_id}-org"
def test_push_to_hub(self):
scheduler = DDIMScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
clip_sample=False,
set_alpha_to_one=False,
)
scheduler.push_to_hub(self.repo_id, token=TOKEN)
scheduler_loaded = DDIMScheduler.from_pretrained(f"{USER}/{self.repo_id}")
assert type(scheduler) == type(scheduler_loaded)
# Reset repo
delete_repo(token=TOKEN, repo_id=self.repo_id)
# Push to hub via save_config
with tempfile.TemporaryDirectory() as tmp_dir:
scheduler.save_config(tmp_dir, repo_id=self.repo_id, push_to_hub=True, token=TOKEN)
scheduler_loaded = DDIMScheduler.from_pretrained(f"{USER}/{self.repo_id}")
assert type(scheduler) == type(scheduler_loaded)
# Reset repo
delete_repo(token=TOKEN, repo_id=self.repo_id)
def test_push_to_hub_in_organization(self):
scheduler = DDIMScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
clip_sample=False,
set_alpha_to_one=False,
)
scheduler.push_to_hub(self.org_repo_id, token=TOKEN)
scheduler_loaded = DDIMScheduler.from_pretrained(self.org_repo_id)
assert type(scheduler) == type(scheduler_loaded)
# Reset repo
delete_repo(token=TOKEN, repo_id=self.org_repo_id)
# Push to hub via save_config
with tempfile.TemporaryDirectory() as tmp_dir:
scheduler.save_config(tmp_dir, repo_id=self.org_repo_id, push_to_hub=True, token=TOKEN)
scheduler_loaded = DDIMScheduler.from_pretrained(self.org_repo_id)
assert type(scheduler) == type(scheduler_loaded)
# Reset repo
delete_repo(token=TOKEN, repo_id=self.org_repo_id)
| 0
|
hf_public_repos/diffusers/tests
|
hf_public_repos/diffusers/tests/schedulers/test_scheduler_ddpm.py
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class DDPMSchedulerTest(SchedulerCommonTest):
scheduler_classes = (DDPMScheduler,)
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**kwargs)
return config
def test_timesteps(self):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_betas(self):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
def test_schedules(self):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=schedule)
def test_variance_type(self):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=variance)
def test_clip_sample(self):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=clip_sample)
def test_thresholding(self):
self.check_over_configs(thresholding=False)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=True,
prediction_type=prediction_type,
sample_max_value=threshold,
)
def test_prediction_type(self):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=prediction_type)
def test_time_indices(self):
for t in [0, 500, 999]:
self.check_over_forward(time_step=t)
def test_variance(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.00979)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1e-5
def test_full_loop_no_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
num_trained_timesteps = len(scheduler)
model = self.dummy_model()
sample = self.dummy_sample_deter
generator = torch.manual_seed(0)
for t in reversed(range(num_trained_timesteps)):
# 1. predict noise residual
residual = model(sample, t)
# 2. predict previous mean of sample x_t-1
pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
sample = pred_prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 258.9606) < 1e-2
assert abs(result_mean.item() - 0.3372) < 1e-3
def test_full_loop_with_v_prediction(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(prediction_type="v_prediction")
scheduler = scheduler_class(**scheduler_config)
num_trained_timesteps = len(scheduler)
model = self.dummy_model()
sample = self.dummy_sample_deter
generator = torch.manual_seed(0)
for t in reversed(range(num_trained_timesteps)):
# 1. predict noise residual
residual = model(sample, t)
# 2. predict previous mean of sample x_t-1
pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
sample = pred_prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 202.0296) < 1e-2
assert abs(result_mean.item() - 0.2631) < 1e-3
def test_custom_timesteps(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
timesteps = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=timesteps)
scheduler_timesteps = scheduler.timesteps
for i, timestep in enumerate(scheduler_timesteps):
if i == len(timesteps) - 1:
expected_prev_t = -1
else:
expected_prev_t = timesteps[i + 1]
prev_t = scheduler.previous_timestep(timestep)
prev_t = prev_t.item()
self.assertEqual(prev_t, expected_prev_t)
def test_custom_timesteps_increasing_order(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
timesteps = [100, 87, 50, 51, 0]
with self.assertRaises(ValueError, msg="`custom_timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=timesteps)
def test_custom_timesteps_passing_both_num_inference_steps_and_timesteps(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
timesteps = [100, 87, 50, 1, 0]
num_inference_steps = len(timesteps)
with self.assertRaises(ValueError, msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."):
scheduler.set_timesteps(num_inference_steps=num_inference_steps, timesteps=timesteps)
def test_custom_timesteps_too_large(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
timesteps = [scheduler.config.num_train_timesteps]
with self.assertRaises(
ValueError,
msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}",
):
scheduler.set_timesteps(timesteps=timesteps)
def test_full_loop_with_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
num_trained_timesteps = len(scheduler)
t_start = num_trained_timesteps - 2
model = self.dummy_model()
sample = self.dummy_sample_deter
generator = torch.manual_seed(0)
# add noise
noise = self.dummy_noise_deter
timesteps = scheduler.timesteps[t_start * scheduler.order :]
sample = scheduler.add_noise(sample, noise, timesteps[:1])
for t in timesteps:
# 1. predict noise residual
residual = model(sample, t)
# 2. predict previous mean of sample x_t-1
pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample
sample = pred_prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 387.9466) < 1e-2, f" expected result sum 387.9466, but get {result_sum}"
assert abs(result_mean.item() - 0.5051) < 1e-3, f" expected result mean 0.5051, but get {result_mean}"
| 0
|
hf_public_repos/diffusers/tests
|
hf_public_repos/diffusers/tests/schedulers/test_scheduler_ipndm.py
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class IPNDMSchedulerTest(SchedulerCommonTest):
scheduler_classes = (IPNDMScheduler,)
forward_default_kwargs = (("num_inference_steps", 50),)
def get_scheduler_config(self, **kwargs):
config = {"num_train_timesteps": 1000}
config.update(**kwargs)
return config
def check_over_configs(self, time_step=0, **config):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
sample = self.dummy_sample
residual = 0.1 * sample
dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residuals
scheduler.ets = dummy_past_residuals[:]
if time_step is None:
time_step = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
new_scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residuals
new_scheduler.ets = dummy_past_residuals[:]
output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def test_from_save_pretrained(self):
pass
def check_over_forward(self, time_step=0, **forward_kwargs):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
sample = self.dummy_sample
residual = 0.1 * sample
dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residuals (must be after setting timesteps)
scheduler.ets = dummy_past_residuals[:]
if time_step is None:
time_step = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
# copy over dummy past residuals
new_scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residual (must be after setting timesteps)
new_scheduler.ets = dummy_past_residuals[:]
output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def full_loop(self, **config):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = 10
model = self.dummy_model()
sample = self.dummy_sample_deter
scheduler.set_timesteps(num_inference_steps)
for i, t in enumerate(scheduler.timesteps):
residual = model(sample, t)
sample = scheduler.step(residual, t, sample).prev_sample
scheduler._step_index = None
for i, t in enumerate(scheduler.timesteps):
residual = model(sample, t)
sample = scheduler.step(residual, t, sample).prev_sample
return sample
def test_step_shape(self):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
sample = self.dummy_sample
residual = 0.1 * sample
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
scheduler.set_timesteps(num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
scheduler.ets = dummy_past_residuals[:]
time_step_0 = scheduler.timesteps[5]
time_step_1 = scheduler.timesteps[6]
output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample
output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample
self.assertEqual(output_0.shape, sample.shape)
self.assertEqual(output_0.shape, output_1.shape)
output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample
output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample
self.assertEqual(output_0.shape, sample.shape)
self.assertEqual(output_0.shape, output_1.shape)
def test_timesteps(self):
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=timesteps, time_step=None)
def test_inference_steps(self):
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100]):
self.check_over_forward(num_inference_steps=num_inference_steps, time_step=None)
def test_full_loop_no_noise(self):
sample = self.full_loop()
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 2540529) < 10
| 0
|
hf_public_repos/diffusers/tests
|
hf_public_repos/diffusers/tests/schedulers/test_scheduler_dpm_multi.py
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class DPMSolverMultistepSchedulerTest(SchedulerCommonTest):
scheduler_classes = (DPMSolverMultistepScheduler,)
forward_default_kwargs = (("num_inference_steps", 25),)
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lower_order_final": False,
"euler_at_final": False,
"lambda_min_clipped": -float("inf"),
"variance_type": None,
}
config.update(**kwargs)
return config
def check_over_configs(self, time_step=0, **config):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
sample = self.dummy_sample
residual = 0.1 * sample
dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residuals
scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
new_scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residuals
new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order]
output, new_output = sample, sample
for t in range(time_step, time_step + scheduler.config.solver_order + 1):
t = new_scheduler.timesteps[t]
output = scheduler.step(residual, t, output, **kwargs).prev_sample
new_output = new_scheduler.step(residual, t, new_output, **kwargs).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def test_from_save_pretrained(self):
pass
def check_over_forward(self, time_step=0, **forward_kwargs):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
sample = self.dummy_sample
residual = 0.1 * sample
dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residuals (must be after setting timesteps)
scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
# copy over dummy past residuals
new_scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residual (must be after setting timesteps)
new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order]
time_step = new_scheduler.timesteps[time_step]
output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def full_loop(self, scheduler=None, **config):
if scheduler is None:
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = 10
model = self.dummy_model()
sample = self.dummy_sample_deter
scheduler.set_timesteps(num_inference_steps)
for i, t in enumerate(scheduler.timesteps):
residual = model(sample, t)
sample = scheduler.step(residual, t, sample).prev_sample
return sample
def test_step_shape(self):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
sample = self.dummy_sample
residual = 0.1 * sample
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
scheduler.set_timesteps(num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10]
scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order]
time_step_0 = scheduler.timesteps[5]
time_step_1 = scheduler.timesteps[6]
output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample
output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample
self.assertEqual(output_0.shape, sample.shape)
self.assertEqual(output_0.shape, output_1.shape)
def test_timesteps(self):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_thresholding(self):
self.check_over_configs(thresholding=False)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=True,
prediction_type=prediction_type,
sample_max_value=threshold,
algorithm_type="dpmsolver++",
solver_order=order,
solver_type=solver_type,
)
def test_prediction_type(self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=prediction_type)
def test_solver_order_and_type(self):
for algorithm_type in ["dpmsolver", "dpmsolver++", "sde-dpmsolver", "sde-dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
if algorithm_type in ["sde-dpmsolver", "sde-dpmsolver++"]:
if order == 3:
continue
else:
self.check_over_configs(
solver_order=order,
solver_type=solver_type,
prediction_type=prediction_type,
algorithm_type=algorithm_type,
)
sample = self.full_loop(
solver_order=order,
solver_type=solver_type,
prediction_type=prediction_type,
algorithm_type=algorithm_type,
)
assert not torch.isnan(sample).any(), "Samples have nan numbers"
def test_lower_order_final(self):
self.check_over_configs(lower_order_final=True)
self.check_over_configs(lower_order_final=False)
def test_euler_at_final(self):
self.check_over_configs(euler_at_final=True)
self.check_over_configs(euler_at_final=False)
def test_lambda_min_clipped(self):
self.check_over_configs(lambda_min_clipped=-float("inf"))
self.check_over_configs(lambda_min_clipped=-5.1)
def test_variance_type(self):
self.check_over_configs(variance_type=None)
self.check_over_configs(variance_type="learned_range")
def test_inference_steps(self):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=num_inference_steps, time_step=0)
def test_full_loop_no_noise(self):
sample = self.full_loop()
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.3301) < 1e-3
def test_full_loop_with_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = 10
t_start = 5
model = self.dummy_model()
sample = self.dummy_sample_deter
scheduler.set_timesteps(num_inference_steps)
# add noise
noise = self.dummy_noise_deter
timesteps = scheduler.timesteps[t_start * scheduler.order :]
sample = scheduler.add_noise(sample, noise, timesteps[:1])
for i, t in enumerate(timesteps):
residual = model(sample, t)
sample = scheduler.step(residual, t, sample).prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 318.4111) < 1e-2, f" expected result sum 318.4111, but get {result_sum}"
assert abs(result_mean.item() - 0.4146) < 1e-3, f" expected result mean 0.4146, but get {result_mean}"
def test_full_loop_no_noise_thres(self):
sample = self.full_loop(thresholding=True, dynamic_thresholding_ratio=0.87, sample_max_value=0.5)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 1.1364) < 1e-3
def test_full_loop_with_v_prediction(self):
sample = self.full_loop(prediction_type="v_prediction")
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.2251) < 1e-3
def test_full_loop_with_karras_and_v_prediction(self):
sample = self.full_loop(prediction_type="v_prediction", use_karras_sigmas=True)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.2096) < 1e-3
def test_full_loop_with_lu_and_v_prediction(self):
sample = self.full_loop(prediction_type="v_prediction", use_lu_lambdas=True)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.1554) < 1e-3
def test_switch(self):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
scheduler = DPMSolverMultistepScheduler(**self.get_scheduler_config())
sample = self.full_loop(scheduler=scheduler)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.3301) < 1e-3
scheduler = DPMSolverSinglestepScheduler.from_config(scheduler.config)
scheduler = UniPCMultistepScheduler.from_config(scheduler.config)
scheduler = DEISMultistepScheduler.from_config(scheduler.config)
scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config)
sample = self.full_loop(scheduler=scheduler)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.3301) < 1e-3
def test_fp16_support(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(thresholding=True, dynamic_thresholding_ratio=0)
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = 10
model = self.dummy_model()
sample = self.dummy_sample_deter.half()
scheduler.set_timesteps(num_inference_steps)
for i, t in enumerate(scheduler.timesteps):
residual = model(sample, t)
sample = scheduler.step(residual, t, sample).prev_sample
assert sample.dtype == torch.float16
def test_duplicated_timesteps(self, **config):
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(scheduler.config.num_train_timesteps)
assert len(scheduler.timesteps) == scheduler.num_inference_steps
| 0
|
hf_public_repos/diffusers/tests
|
hf_public_repos/diffusers/tests/schedulers/test_scheduler_ddim_inverse.py
|
import torch
from diffusers import DDIMInverseScheduler
from .test_schedulers import SchedulerCommonTest
class DDIMInverseSchedulerTest(SchedulerCommonTest):
scheduler_classes = (DDIMInverseScheduler,)
forward_default_kwargs = (("eta", 0.0), ("num_inference_steps", 50))
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**kwargs)
return config
def full_loop(self, **config):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
num_inference_steps, eta = 10, 0.0
model = self.dummy_model()
sample = self.dummy_sample_deter
scheduler.set_timesteps(num_inference_steps)
for t in scheduler.timesteps:
residual = model(sample, t)
sample = scheduler.step(residual, t, sample, eta).prev_sample
return sample
def test_timesteps(self):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_steps_offset(self):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=steps_offset)
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(steps_offset=1)
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps, torch.LongTensor([1, 201, 401, 601, 801]))
def test_betas(self):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
def test_schedules(self):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=schedule)
def test_prediction_type(self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=prediction_type)
def test_clip_sample(self):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=clip_sample)
def test_timestep_spacing(self):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=timestep_spacing)
def test_rescale_betas_zero_snr(self):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr)
def test_thresholding(self):
self.check_over_configs(thresholding=False)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=True,
prediction_type=prediction_type,
sample_max_value=threshold,
)
def test_time_indices(self):
for t in [1, 10, 49]:
self.check_over_forward(time_step=t)
def test_inference_steps(self):
for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500]):
self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps)
def test_add_noise_device(self):
pass
def test_full_loop_no_noise(self):
sample = self.full_loop()
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 671.6816) < 1e-2
assert abs(result_mean.item() - 0.8746) < 1e-3
def test_full_loop_with_v_prediction(self):
sample = self.full_loop(prediction_type="v_prediction")
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 1394.2185) < 1e-2
assert abs(result_mean.item() - 1.8154) < 1e-3
def test_full_loop_with_set_alpha_to_one(self):
# We specify different beta, so that the first alpha is 0.99
sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01)
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 539.9622) < 1e-2
assert abs(result_mean.item() - 0.7031) < 1e-3
def test_full_loop_with_no_set_alpha_to_one(self):
# We specify different beta, so that the first alpha is 0.99
sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01)
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 542.6722) < 1e-2
assert abs(result_mean.item() - 0.7066) < 1e-3
| 0
|
hf_public_repos/diffusers/tests
|
hf_public_repos/diffusers/tests/schedulers/test_scheduler_ddim.py
|
import torch
from diffusers import DDIMScheduler
from .test_schedulers import SchedulerCommonTest
class DDIMSchedulerTest(SchedulerCommonTest):
scheduler_classes = (DDIMScheduler,)
forward_default_kwargs = (("eta", 0.0), ("num_inference_steps", 50))
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**kwargs)
return config
def full_loop(self, **config):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
num_inference_steps, eta = 10, 0.0
model = self.dummy_model()
sample = self.dummy_sample_deter
scheduler.set_timesteps(num_inference_steps)
for t in scheduler.timesteps:
residual = model(sample, t)
sample = scheduler.step(residual, t, sample, eta).prev_sample
return sample
def test_timesteps(self):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_steps_offset(self):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=steps_offset)
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(steps_offset=1)
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps, torch.LongTensor([801, 601, 401, 201, 1]))
def test_betas(self):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
def test_schedules(self):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=schedule)
def test_prediction_type(self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=prediction_type)
def test_clip_sample(self):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=clip_sample)
def test_timestep_spacing(self):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=timestep_spacing)
def test_rescale_betas_zero_snr(self):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr)
def test_thresholding(self):
self.check_over_configs(thresholding=False)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=True,
prediction_type=prediction_type,
sample_max_value=threshold,
)
def test_time_indices(self):
for t in [1, 10, 49]:
self.check_over_forward(time_step=t)
def test_inference_steps(self):
for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500]):
self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps)
def test_eta(self):
for t, eta in zip([1, 10, 49], [0.0, 0.5, 1.0]):
self.check_over_forward(time_step=t, eta=eta)
def test_variance(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
assert torch.sum(torch.abs(scheduler._get_variance(0, 0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420, 400) - 0.14771)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980, 960) - 0.32460)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0, 0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487, 486) - 0.00979)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999, 998) - 0.02)) < 1e-5
def test_full_loop_no_noise(self):
sample = self.full_loop()
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 172.0067) < 1e-2
assert abs(result_mean.item() - 0.223967) < 1e-3
def test_full_loop_with_v_prediction(self):
sample = self.full_loop(prediction_type="v_prediction")
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 52.5302) < 1e-2
assert abs(result_mean.item() - 0.0684) < 1e-3
def test_full_loop_with_set_alpha_to_one(self):
# We specify different beta, so that the first alpha is 0.99
sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01)
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 149.8295) < 1e-2
assert abs(result_mean.item() - 0.1951) < 1e-3
def test_full_loop_with_no_set_alpha_to_one(self):
# We specify different beta, so that the first alpha is 0.99
sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01)
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 149.0784) < 1e-2
assert abs(result_mean.item() - 0.1941) < 1e-3
def test_full_loop_with_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
num_inference_steps, eta = 10, 0.0
t_start = 8
model = self.dummy_model()
sample = self.dummy_sample_deter
scheduler.set_timesteps(num_inference_steps)
# add noise
noise = self.dummy_noise_deter
timesteps = scheduler.timesteps[t_start * scheduler.order :]
sample = scheduler.add_noise(sample, noise, timesteps[:1])
for t in timesteps:
residual = model(sample, t)
sample = scheduler.step(residual, t, sample, eta).prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 354.5418) < 1e-2, f" expected result sum 218.4379, but get {result_sum}"
assert abs(result_mean.item() - 0.4616) < 1e-3, f" expected result mean 0.2844, but get {result_mean}"
| 0
|
hf_public_repos/diffusers/tests
|
hf_public_repos/diffusers/tests/schedulers/test_scheduler_flax.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import tempfile
import unittest
from typing import Dict, List, Tuple
from diffusers import FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxPNDMScheduler
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from jax import random
jax_device = jax.default_backend()
@require_flax
class FlaxSchedulerCommonTest(unittest.TestCase):
scheduler_classes = ()
forward_default_kwargs = ()
@property
def dummy_sample(self):
batch_size = 4
num_channels = 3
height = 8
width = 8
key1, key2 = random.split(random.PRNGKey(0))
sample = random.uniform(key1, (batch_size, num_channels, height, width))
return sample, key2
@property
def dummy_sample_deter(self):
batch_size = 4
num_channels = 3
height = 8
width = 8
num_elems = batch_size * num_channels * height * width
sample = jnp.arange(num_elems)
sample = sample.reshape(num_channels, height, width, batch_size)
sample = sample / num_elems
return jnp.transpose(sample, (3, 0, 1, 2))
def get_scheduler_config(self):
raise NotImplementedError
def dummy_model(self):
def model(sample, t, *args):
return sample * t / (t + 1)
return model
def check_over_configs(self, time_step=0, **config):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
for scheduler_class in self.scheduler_classes:
sample, key = self.dummy_sample
residual = 0.1 * sample
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname)
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
state = scheduler.set_timesteps(state, num_inference_steps)
new_state = new_scheduler.set_timesteps(new_state, num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
output = scheduler.step(state, residual, time_step, sample, key, **kwargs).prev_sample
new_output = new_scheduler.step(new_state, residual, time_step, sample, key, **kwargs).prev_sample
assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def check_over_forward(self, time_step=0, **forward_kwargs):
kwargs = dict(self.forward_default_kwargs)
kwargs.update(forward_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
for scheduler_class in self.scheduler_classes:
sample, key = self.dummy_sample
residual = 0.1 * sample
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname)
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
state = scheduler.set_timesteps(state, num_inference_steps)
new_state = new_scheduler.set_timesteps(new_state, num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
output = scheduler.step(state, residual, time_step, sample, key, **kwargs).prev_sample
new_output = new_scheduler.step(new_state, residual, time_step, sample, key, **kwargs).prev_sample
assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def test_from_save_pretrained(self):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
for scheduler_class in self.scheduler_classes:
sample, key = self.dummy_sample
residual = 0.1 * sample
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname)
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
state = scheduler.set_timesteps(state, num_inference_steps)
new_state = new_scheduler.set_timesteps(new_state, num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
output = scheduler.step(state, residual, 1, sample, key, **kwargs).prev_sample
new_output = new_scheduler.step(new_state, residual, 1, sample, key, **kwargs).prev_sample
assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def test_step_shape(self):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
sample, key = self.dummy_sample
residual = 0.1 * sample
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
state = scheduler.set_timesteps(state, num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
output_0 = scheduler.step(state, residual, 0, sample, key, **kwargs).prev_sample
output_1 = scheduler.step(state, residual, 1, sample, key, **kwargs).prev_sample
self.assertEqual(output_0.shape, sample.shape)
self.assertEqual(output_0.shape, output_1.shape)
def test_scheduler_outputs_equivalence(self):
def set_nan_tensor_to_zero(t):
return t.at[t != t].set(0)
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif isinstance(tuple_object, Dict):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
jnp.allclose(set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5),
msg=(
"Tuple and dict output are not equal. Difference:"
f" {jnp.max(jnp.abs(tuple_object - dict_object))}. Tuple has `nan`:"
f" {jnp.isnan(tuple_object).any()} and `inf`: {jnp.isinf(tuple_object)}. Dict has"
f" `nan`: {jnp.isnan(dict_object).any()} and `inf`: {jnp.isinf(dict_object)}."
),
)
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
sample, key = self.dummy_sample
residual = 0.1 * sample
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
state = scheduler.set_timesteps(state, num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
outputs_dict = scheduler.step(state, residual, 0, sample, key, **kwargs)
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
state = scheduler.set_timesteps(state, num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
outputs_tuple = scheduler.step(state, residual, 0, sample, key, return_dict=False, **kwargs)
recursive_check(outputs_tuple[0], outputs_dict.prev_sample)
def test_deprecated_kwargs(self):
for scheduler_class in self.scheduler_classes:
has_kwarg_in_model_class = "kwargs" in inspect.signature(scheduler_class.__init__).parameters
has_deprecated_kwarg = len(scheduler_class._deprecated_kwargs) > 0
if has_kwarg_in_model_class and not has_deprecated_kwarg:
raise ValueError(
f"{scheduler_class} has `**kwargs` in its __init__ method but has not defined any deprecated"
" kwargs under the `_deprecated_kwargs` class attribute. Make sure to either remove `**kwargs` if"
" there are no deprecated arguments or add the deprecated argument with `_deprecated_kwargs ="
" [<deprecated_argument>]`"
)
if not has_kwarg_in_model_class and has_deprecated_kwarg:
raise ValueError(
f"{scheduler_class} doesn't have `**kwargs` in its __init__ method but has defined deprecated"
" kwargs under the `_deprecated_kwargs` class attribute. Make sure to either add the `**kwargs`"
f" argument to {self.model_class}.__init__ if there are deprecated arguments or remove the"
" deprecated argument from `_deprecated_kwargs = [<deprecated_argument>]`"
)
@require_flax
class FlaxDDPMSchedulerTest(FlaxSchedulerCommonTest):
scheduler_classes = (FlaxDDPMScheduler,)
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**kwargs)
return config
def test_timesteps(self):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_betas(self):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
def test_schedules(self):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=schedule)
def test_variance_type(self):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=variance)
def test_clip_sample(self):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=clip_sample)
def test_time_indices(self):
for t in [0, 500, 999]:
self.check_over_forward(time_step=t)
def test_variance(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
assert jnp.sum(jnp.abs(scheduler._get_variance(state, 0) - 0.0)) < 1e-5
assert jnp.sum(jnp.abs(scheduler._get_variance(state, 487) - 0.00979)) < 1e-5
assert jnp.sum(jnp.abs(scheduler._get_variance(state, 999) - 0.02)) < 1e-5
def test_full_loop_no_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
num_trained_timesteps = len(scheduler)
model = self.dummy_model()
sample = self.dummy_sample_deter
key1, key2 = random.split(random.PRNGKey(0))
for t in reversed(range(num_trained_timesteps)):
# 1. predict noise residual
residual = model(sample, t)
# 2. predict previous mean of sample x_t-1
output = scheduler.step(state, residual, t, sample, key1)
pred_prev_sample = output.prev_sample
state = output.state
key1, key2 = random.split(key2)
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
sample = pred_prev_sample
result_sum = jnp.sum(jnp.abs(sample))
result_mean = jnp.mean(jnp.abs(sample))
if jax_device == "tpu":
assert abs(result_sum - 255.0714) < 1e-2
assert abs(result_mean - 0.332124) < 1e-3
else:
assert abs(result_sum - 255.1113) < 1e-2
assert abs(result_mean - 0.332176) < 1e-3
@require_flax
class FlaxDDIMSchedulerTest(FlaxSchedulerCommonTest):
scheduler_classes = (FlaxDDIMScheduler,)
forward_default_kwargs = (("num_inference_steps", 50),)
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**kwargs)
return config
def full_loop(self, **config):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
key1, key2 = random.split(random.PRNGKey(0))
num_inference_steps = 10
model = self.dummy_model()
sample = self.dummy_sample_deter
state = scheduler.set_timesteps(state, num_inference_steps)
for t in state.timesteps:
residual = model(sample, t)
output = scheduler.step(state, residual, t, sample)
sample = output.prev_sample
state = output.state
key1, key2 = random.split(key2)
return sample
def check_over_configs(self, time_step=0, **config):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
for scheduler_class in self.scheduler_classes:
sample, _ = self.dummy_sample
residual = 0.1 * sample
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname)
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
state = scheduler.set_timesteps(state, num_inference_steps)
new_state = new_scheduler.set_timesteps(new_state, num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
output = scheduler.step(state, residual, time_step, sample, **kwargs).prev_sample
new_output = new_scheduler.step(new_state, residual, time_step, sample, **kwargs).prev_sample
assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def test_from_save_pretrained(self):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
for scheduler_class in self.scheduler_classes:
sample, _ = self.dummy_sample
residual = 0.1 * sample
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname)
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
state = scheduler.set_timesteps(state, num_inference_steps)
new_state = new_scheduler.set_timesteps(new_state, num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
output = scheduler.step(state, residual, 1, sample, **kwargs).prev_sample
new_output = new_scheduler.step(new_state, residual, 1, sample, **kwargs).prev_sample
assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def check_over_forward(self, time_step=0, **forward_kwargs):
kwargs = dict(self.forward_default_kwargs)
kwargs.update(forward_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
for scheduler_class in self.scheduler_classes:
sample, _ = self.dummy_sample
residual = 0.1 * sample
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname)
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
state = scheduler.set_timesteps(state, num_inference_steps)
new_state = new_scheduler.set_timesteps(new_state, num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
output = scheduler.step(state, residual, time_step, sample, **kwargs).prev_sample
new_output = new_scheduler.step(new_state, residual, time_step, sample, **kwargs).prev_sample
assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def test_scheduler_outputs_equivalence(self):
def set_nan_tensor_to_zero(t):
return t.at[t != t].set(0)
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif isinstance(tuple_object, Dict):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
jnp.allclose(set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5),
msg=(
"Tuple and dict output are not equal. Difference:"
f" {jnp.max(jnp.abs(tuple_object - dict_object))}. Tuple has `nan`:"
f" {jnp.isnan(tuple_object).any()} and `inf`: {jnp.isinf(tuple_object)}. Dict has"
f" `nan`: {jnp.isnan(dict_object).any()} and `inf`: {jnp.isinf(dict_object)}."
),
)
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
sample, _ = self.dummy_sample
residual = 0.1 * sample
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
state = scheduler.set_timesteps(state, num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
outputs_dict = scheduler.step(state, residual, 0, sample, **kwargs)
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
state = scheduler.set_timesteps(state, num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
outputs_tuple = scheduler.step(state, residual, 0, sample, return_dict=False, **kwargs)
recursive_check(outputs_tuple[0], outputs_dict.prev_sample)
def test_step_shape(self):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
sample, _ = self.dummy_sample
residual = 0.1 * sample
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
state = scheduler.set_timesteps(state, num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
output_0 = scheduler.step(state, residual, 0, sample, **kwargs).prev_sample
output_1 = scheduler.step(state, residual, 1, sample, **kwargs).prev_sample
self.assertEqual(output_0.shape, sample.shape)
self.assertEqual(output_0.shape, output_1.shape)
def test_timesteps(self):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_steps_offset(self):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=steps_offset)
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(steps_offset=1)
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
state = scheduler.set_timesteps(state, 5)
assert jnp.equal(state.timesteps, jnp.array([801, 601, 401, 201, 1])).all()
def test_betas(self):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
def test_schedules(self):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=schedule)
def test_time_indices(self):
for t in [1, 10, 49]:
self.check_over_forward(time_step=t)
def test_inference_steps(self):
for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500]):
self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps)
def test_variance(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
assert jnp.sum(jnp.abs(scheduler._get_variance(state, 0, 0) - 0.0)) < 1e-5
assert jnp.sum(jnp.abs(scheduler._get_variance(state, 420, 400) - 0.14771)) < 1e-5
assert jnp.sum(jnp.abs(scheduler._get_variance(state, 980, 960) - 0.32460)) < 1e-5
assert jnp.sum(jnp.abs(scheduler._get_variance(state, 0, 0) - 0.0)) < 1e-5
assert jnp.sum(jnp.abs(scheduler._get_variance(state, 487, 486) - 0.00979)) < 1e-5
assert jnp.sum(jnp.abs(scheduler._get_variance(state, 999, 998) - 0.02)) < 1e-5
def test_full_loop_no_noise(self):
sample = self.full_loop()
result_sum = jnp.sum(jnp.abs(sample))
result_mean = jnp.mean(jnp.abs(sample))
assert abs(result_sum - 172.0067) < 1e-2
assert abs(result_mean - 0.223967) < 1e-3
def test_full_loop_with_set_alpha_to_one(self):
# We specify different beta, so that the first alpha is 0.99
sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01)
result_sum = jnp.sum(jnp.abs(sample))
result_mean = jnp.mean(jnp.abs(sample))
if jax_device == "tpu":
assert abs(result_sum - 149.8409) < 1e-2
assert abs(result_mean - 0.1951) < 1e-3
else:
assert abs(result_sum - 149.8295) < 1e-2
assert abs(result_mean - 0.1951) < 1e-3
def test_full_loop_with_no_set_alpha_to_one(self):
# We specify different beta, so that the first alpha is 0.99
sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01)
result_sum = jnp.sum(jnp.abs(sample))
result_mean = jnp.mean(jnp.abs(sample))
if jax_device == "tpu":
pass
# FIXME: both result_sum and result_mean are nan on TPU
# assert jnp.isnan(result_sum)
# assert jnp.isnan(result_mean)
else:
assert abs(result_sum - 149.0784) < 1e-2
assert abs(result_mean - 0.1941) < 1e-3
def test_prediction_type(self):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=prediction_type)
@require_flax
class FlaxPNDMSchedulerTest(FlaxSchedulerCommonTest):
scheduler_classes = (FlaxPNDMScheduler,)
forward_default_kwargs = (("num_inference_steps", 50),)
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**kwargs)
return config
def check_over_configs(self, time_step=0, **config):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
sample, _ = self.dummy_sample
residual = 0.1 * sample
dummy_past_residuals = jnp.array([residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05])
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape)
# copy over dummy past residuals
state = state.replace(ets=dummy_past_residuals[:])
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname)
new_state = new_scheduler.set_timesteps(new_state, num_inference_steps, shape=sample.shape)
# copy over dummy past residuals
new_state = new_state.replace(ets=dummy_past_residuals[:])
(prev_sample, state) = scheduler.step_prk(state, residual, time_step, sample, **kwargs)
(new_prev_sample, new_state) = new_scheduler.step_prk(new_state, residual, time_step, sample, **kwargs)
assert jnp.sum(jnp.abs(prev_sample - new_prev_sample)) < 1e-5, "Scheduler outputs are not identical"
output, _ = scheduler.step_plms(state, residual, time_step, sample, **kwargs)
new_output, _ = new_scheduler.step_plms(new_state, residual, time_step, sample, **kwargs)
assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def test_from_save_pretrained(self):
pass
def test_scheduler_outputs_equivalence(self):
def set_nan_tensor_to_zero(t):
return t.at[t != t].set(0)
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif isinstance(tuple_object, Dict):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
jnp.allclose(set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5),
msg=(
"Tuple and dict output are not equal. Difference:"
f" {jnp.max(jnp.abs(tuple_object - dict_object))}. Tuple has `nan`:"
f" {jnp.isnan(tuple_object).any()} and `inf`: {jnp.isinf(tuple_object)}. Dict has"
f" `nan`: {jnp.isnan(dict_object).any()} and `inf`: {jnp.isinf(dict_object)}."
),
)
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
sample, _ = self.dummy_sample
residual = 0.1 * sample
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
outputs_dict = scheduler.step(state, residual, 0, sample, **kwargs)
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
outputs_tuple = scheduler.step(state, residual, 0, sample, return_dict=False, **kwargs)
recursive_check(outputs_tuple[0], outputs_dict.prev_sample)
def check_over_forward(self, time_step=0, **forward_kwargs):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
sample, _ = self.dummy_sample
residual = 0.1 * sample
dummy_past_residuals = jnp.array([residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05])
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape)
# copy over dummy past residuals (must be after setting timesteps)
scheduler.ets = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname)
# copy over dummy past residuals
new_state = new_scheduler.set_timesteps(new_state, num_inference_steps, shape=sample.shape)
# copy over dummy past residual (must be after setting timesteps)
new_state.replace(ets=dummy_past_residuals[:])
output, state = scheduler.step_prk(state, residual, time_step, sample, **kwargs)
new_output, new_state = new_scheduler.step_prk(new_state, residual, time_step, sample, **kwargs)
assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
output, _ = scheduler.step_plms(state, residual, time_step, sample, **kwargs)
new_output, _ = new_scheduler.step_plms(new_state, residual, time_step, sample, **kwargs)
assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def full_loop(self, **config):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
num_inference_steps = 10
model = self.dummy_model()
sample = self.dummy_sample_deter
state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape)
for i, t in enumerate(state.prk_timesteps):
residual = model(sample, t)
sample, state = scheduler.step_prk(state, residual, t, sample)
for i, t in enumerate(state.plms_timesteps):
residual = model(sample, t)
sample, state = scheduler.step_plms(state, residual, t, sample)
return sample
def test_step_shape(self):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
sample, _ = self.dummy_sample
residual = 0.1 * sample
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
dummy_past_residuals = jnp.array([residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05])
state = state.replace(ets=dummy_past_residuals[:])
output_0, state = scheduler.step_prk(state, residual, 0, sample, **kwargs)
output_1, state = scheduler.step_prk(state, residual, 1, sample, **kwargs)
self.assertEqual(output_0.shape, sample.shape)
self.assertEqual(output_0.shape, output_1.shape)
output_0, state = scheduler.step_plms(state, residual, 0, sample, **kwargs)
output_1, state = scheduler.step_plms(state, residual, 1, sample, **kwargs)
self.assertEqual(output_0.shape, sample.shape)
self.assertEqual(output_0.shape, output_1.shape)
def test_timesteps(self):
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_steps_offset(self):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=steps_offset)
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(steps_offset=1)
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
state = scheduler.set_timesteps(state, 10, shape=())
assert jnp.equal(
state.timesteps,
jnp.array([901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]),
).all()
def test_betas(self):
for beta_start, beta_end in zip([0.0001, 0.001], [0.002, 0.02]):
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
def test_schedules(self):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=schedule)
def test_time_indices(self):
for t in [1, 5, 10]:
self.check_over_forward(time_step=t)
def test_inference_steps(self):
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100]):
self.check_over_forward(num_inference_steps=num_inference_steps)
def test_pow_of_3_inference_steps(self):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
num_inference_steps = 27
for scheduler_class in self.scheduler_classes:
sample, _ = self.dummy_sample
residual = 0.1 * sample
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(state.prk_timesteps[:2]):
sample, state = scheduler.step_prk(state, residual, t, sample)
def test_inference_plms_no_past_residuals(self):
with self.assertRaises(ValueError):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
scheduler.step_plms(state, self.dummy_sample, 1, self.dummy_sample).prev_sample
def test_full_loop_no_noise(self):
sample = self.full_loop()
result_sum = jnp.sum(jnp.abs(sample))
result_mean = jnp.mean(jnp.abs(sample))
if jax_device == "tpu":
assert abs(result_sum - 198.1275) < 1e-2
assert abs(result_mean - 0.2580) < 1e-3
else:
assert abs(result_sum - 198.1318) < 1e-2
assert abs(result_mean - 0.2580) < 1e-3
def test_full_loop_with_set_alpha_to_one(self):
# We specify different beta, so that the first alpha is 0.99
sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01)
result_sum = jnp.sum(jnp.abs(sample))
result_mean = jnp.mean(jnp.abs(sample))
if jax_device == "tpu":
assert abs(result_sum - 186.83226) < 1e-2
assert abs(result_mean - 0.24327) < 1e-3
else:
assert abs(result_sum - 186.9466) < 1e-2
assert abs(result_mean - 0.24342) < 1e-3
def test_full_loop_with_no_set_alpha_to_one(self):
# We specify different beta, so that the first alpha is 0.99
sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01)
result_sum = jnp.sum(jnp.abs(sample))
result_mean = jnp.mean(jnp.abs(sample))
if jax_device == "tpu":
assert abs(result_sum - 186.83226) < 1e-2
assert abs(result_mean - 0.24327) < 1e-3
else:
assert abs(result_sum - 186.9482) < 1e-2
assert abs(result_mean - 0.2434) < 1e-3
| 0
|
hf_public_repos/diffusers/tests
|
hf_public_repos/diffusers/tests/schedulers/test_scheduler_consistency_model.py
|
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class CMStochasticIterativeSchedulerTest(SchedulerCommonTest):
scheduler_classes = (CMStochasticIterativeScheduler,)
num_inference_steps = 10
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
config.update(**kwargs)
return config
# Override test_step_shape to add CMStochasticIterativeScheduler-specific logic regarding timesteps
# Problem is that we don't know two timesteps that will always be in the timestep schedule from only the scheduler
# config; scaled sigma_max is always in the timestep schedule, but sigma_min is in the sigma schedule while scaled
# sigma_min is not in the timestep schedule
def test_step_shape(self):
num_inference_steps = 10
scheduler_config = self.get_scheduler_config()
scheduler = self.scheduler_classes[0](**scheduler_config)
scheduler.set_timesteps(num_inference_steps)
timestep_0 = scheduler.timesteps[0]
timestep_1 = scheduler.timesteps[1]
sample = self.dummy_sample
residual = 0.1 * sample
output_0 = scheduler.step(residual, timestep_0, sample).prev_sample
output_1 = scheduler.step(residual, timestep_1, sample).prev_sample
self.assertEqual(output_0.shape, sample.shape)
self.assertEqual(output_0.shape, output_1.shape)
def test_timesteps(self):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_clip_denoised(self):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=clip_denoised)
def test_full_loop_no_noise_onestep(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = 1
scheduler.set_timesteps(num_inference_steps)
timesteps = scheduler.timesteps
generator = torch.manual_seed(0)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(timesteps):
# 1. scale model input
scaled_sample = scheduler.scale_model_input(sample, t)
# 2. predict noise residual
residual = model(scaled_sample, t)
# 3. predict previous sample x_t-1
pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample
sample = pred_prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 192.7614) < 1e-2
assert abs(result_mean.item() - 0.2510) < 1e-3
def test_full_loop_no_noise_multistep(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
timesteps = [106, 0]
scheduler.set_timesteps(timesteps=timesteps)
timesteps = scheduler.timesteps
generator = torch.manual_seed(0)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
scaled_sample = scheduler.scale_model_input(sample, t)
# 2. predict noise residual
residual = model(scaled_sample, t)
# 3. predict previous sample x_t-1
pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample
sample = pred_prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 347.6357) < 1e-2
assert abs(result_mean.item() - 0.4527) < 1e-3
def test_full_loop_with_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = 10
t_start = 8
scheduler.set_timesteps(num_inference_steps)
timesteps = scheduler.timesteps
generator = torch.manual_seed(0)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
noise = self.dummy_noise_deter
timesteps = scheduler.timesteps[t_start * scheduler.order :]
sample = scheduler.add_noise(sample, noise, timesteps[:1])
for t in timesteps:
# 1. scale model input
scaled_sample = scheduler.scale_model_input(sample, t)
# 2. predict noise residual
residual = model(scaled_sample, t)
# 3. predict previous sample x_t-1
pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample
sample = pred_prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 763.9186) < 1e-2, f" expected result sum 763.9186, but get {result_sum}"
assert abs(result_mean.item() - 0.9947) < 1e-3, f" expected result mean 0.9947, but get {result_mean}"
def test_custom_timesteps_increasing_order(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
timesteps = [39, 30, 12, 15, 0]
with self.assertRaises(ValueError, msg="`timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=timesteps)
def test_custom_timesteps_passing_both_num_inference_steps_and_timesteps(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
timesteps = [39, 30, 12, 1, 0]
num_inference_steps = len(timesteps)
with self.assertRaises(ValueError, msg="Can only pass one of `num_inference_steps` or `timesteps`."):
scheduler.set_timesteps(num_inference_steps=num_inference_steps, timesteps=timesteps)
def test_custom_timesteps_too_large(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
timesteps = [scheduler.config.num_train_timesteps]
with self.assertRaises(
ValueError,
msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}",
):
scheduler.set_timesteps(timesteps=timesteps)
| 0
|
hf_public_repos/diffusers/tests
|
hf_public_repos/diffusers/tests/schedulers/test_scheduler_euler.py
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils.testing_utils import torch_device
from .test_schedulers import SchedulerCommonTest
class EulerDiscreteSchedulerTest(SchedulerCommonTest):
scheduler_classes = (EulerDiscreteScheduler,)
num_inference_steps = 10
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1100,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**kwargs)
return config
def test_timesteps(self):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_betas(self):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]):
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
def test_schedules(self):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=schedule)
def test_prediction_type(self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=prediction_type)
def test_full_loop_no_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps)
generator = torch.manual_seed(0)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
sample = sample.to(torch_device)
for i, t in enumerate(scheduler.timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample, generator=generator)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 10.0807) < 1e-2
assert abs(result_mean.item() - 0.0131) < 1e-3
def test_full_loop_with_v_prediction(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(prediction_type="v_prediction")
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps)
generator = torch.manual_seed(0)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
sample = sample.to(torch_device)
for i, t in enumerate(scheduler.timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample, generator=generator)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 0.0002) < 1e-2
assert abs(result_mean.item() - 2.2676e-06) < 1e-3
def test_full_loop_device(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps, device=torch_device)
generator = torch.manual_seed(0)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
sample = sample.to(torch_device)
for t in scheduler.timesteps:
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample, generator=generator)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 10.0807) < 1e-2
assert abs(result_mean.item() - 0.0131) < 1e-3
def test_full_loop_device_karras_sigmas(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config, use_karras_sigmas=True)
scheduler.set_timesteps(self.num_inference_steps, device=torch_device)
generator = torch.manual_seed(0)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
sample = sample.to(torch_device)
for t in scheduler.timesteps:
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample, generator=generator)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 124.52299499511719) < 1e-2
assert abs(result_mean.item() - 0.16213932633399963) < 1e-3
def test_full_loop_with_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps)
generator = torch.manual_seed(0)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
# add noise
t_start = self.num_inference_steps - 2
noise = self.dummy_noise_deter
noise = noise.to(sample.device)
timesteps = scheduler.timesteps[t_start * scheduler.order :]
sample = scheduler.add_noise(sample, noise, timesteps[:1])
for i, t in enumerate(timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample, generator=generator)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 57062.9297) < 1e-2, f" expected result sum 57062.9297, but get {result_sum}"
assert abs(result_mean.item() - 74.3007) < 1e-3, f" expected result mean 74.3007, but get {result_mean}"
| 0
|
hf_public_repos/diffusers/tests
|
hf_public_repos/diffusers/tests/schedulers/test_scheduler_heun.py
|
import torch
from diffusers import HeunDiscreteScheduler
from diffusers.utils.testing_utils import torch_device
from .test_schedulers import SchedulerCommonTest
class HeunDiscreteSchedulerTest(SchedulerCommonTest):
scheduler_classes = (HeunDiscreteScheduler,)
num_inference_steps = 10
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1100,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**kwargs)
return config
def test_timesteps(self):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_betas(self):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]):
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
def test_schedules(self):
for schedule in ["linear", "scaled_linear", "exp"]:
self.check_over_configs(beta_schedule=schedule)
def test_clip_sample(self):
for clip_sample_range in [1.0, 2.0, 3.0]:
self.check_over_configs(clip_sample_range=clip_sample_range, clip_sample=True)
def test_prediction_type(self):
for prediction_type in ["epsilon", "v_prediction", "sample"]:
self.check_over_configs(prediction_type=prediction_type)
def test_full_loop_no_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
sample = sample.to(torch_device)
for i, t in enumerate(scheduler.timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 0.1233) < 1e-2
assert abs(result_mean.item() - 0.0002) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 0.1233) < 1e-2
assert abs(result_mean.item() - 0.0002) < 1e-3
def test_full_loop_with_v_prediction(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(prediction_type="v_prediction")
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
sample = sample.to(torch_device)
for i, t in enumerate(scheduler.timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07) < 1e-2
assert abs(result_mean.item() - 6.1112e-10) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972e-07) < 1e-2
assert abs(result_mean.item() - 0.0002) < 1e-3
def test_full_loop_device(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps, device=torch_device)
model = self.dummy_model()
sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
if str(torch_device).startswith("cpu"):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 0.1233) < 1e-2
assert abs(result_mean.item() - 0.0002) < 1e-3
elif str(torch_device).startswith("mps"):
# Larger tolerance on mps
assert abs(result_mean.item() - 0.0002) < 1e-2
else:
# CUDA
assert abs(result_sum.item() - 0.1233) < 1e-2
assert abs(result_mean.item() - 0.0002) < 1e-3
def test_full_loop_device_karras_sigmas(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config, use_karras_sigmas=True)
scheduler.set_timesteps(self.num_inference_steps, device=torch_device)
model = self.dummy_model()
sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma
sample = sample.to(torch_device)
for t in scheduler.timesteps:
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 0.00015) < 1e-2
assert abs(result_mean.item() - 1.9869554535034695e-07) < 1e-2
def test_full_loop_with_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
sample = sample.to(torch_device)
t_start = self.num_inference_steps - 2
noise = self.dummy_noise_deter
noise = noise.to(torch_device)
timesteps = scheduler.timesteps[t_start * scheduler.order :]
sample = scheduler.add_noise(sample, noise, timesteps[:1])
for i, t in enumerate(timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 75074.8906) < 1e-2, f" expected result sum 75074.8906, but get {result_sum}"
assert abs(result_mean.item() - 97.7538) < 1e-3, f" expected result mean 97.7538, but get {result_mean}"
| 0
|
hf_public_repos/diffusers/tests
|
hf_public_repos/diffusers/tests/schedulers/test_scheduler_unipc.py
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class UniPCMultistepSchedulerTest(SchedulerCommonTest):
scheduler_classes = (UniPCMultistepScheduler,)
forward_default_kwargs = (("num_inference_steps", 25),)
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**kwargs)
return config
def check_over_configs(self, time_step=0, **config):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
sample = self.dummy_sample
residual = 0.1 * sample
dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residuals
scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
new_scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residuals
new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order]
output, new_output = sample, sample
for t in range(time_step, time_step + scheduler.config.solver_order + 1):
t = scheduler.timesteps[t]
output = scheduler.step(residual, t, output, **kwargs).prev_sample
new_output = new_scheduler.step(residual, t, new_output, **kwargs).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def check_over_forward(self, time_step=0, **forward_kwargs):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
sample = self.dummy_sample
residual = 0.1 * sample
dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residuals (must be after setting timesteps)
scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
# copy over dummy past residuals
new_scheduler.set_timesteps(num_inference_steps)
# copy over dummy past residual (must be after setting timesteps)
new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order]
output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def full_loop(self, scheduler=None, **config):
if scheduler is None:
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = 10
model = self.dummy_model()
sample = self.dummy_sample_deter
scheduler.set_timesteps(num_inference_steps)
for i, t in enumerate(scheduler.timesteps):
residual = model(sample, t)
sample = scheduler.step(residual, t, sample).prev_sample
return sample
def test_step_shape(self):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
sample = self.dummy_sample
residual = 0.1 * sample
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
scheduler.set_timesteps(num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10]
scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order]
time_step_0 = scheduler.timesteps[5]
time_step_1 = scheduler.timesteps[6]
output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample
output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample
self.assertEqual(output_0.shape, sample.shape)
self.assertEqual(output_0.shape, output_1.shape)
def test_switch(self):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
scheduler = UniPCMultistepScheduler(**self.get_scheduler_config())
sample = self.full_loop(scheduler=scheduler)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.2464) < 1e-3
scheduler = DPMSolverSinglestepScheduler.from_config(scheduler.config)
scheduler = DEISMultistepScheduler.from_config(scheduler.config)
scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config)
scheduler = UniPCMultistepScheduler.from_config(scheduler.config)
sample = self.full_loop(scheduler=scheduler)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.2464) < 1e-3
def test_timesteps(self):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_thresholding(self):
self.check_over_configs(thresholding=False)
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=True,
prediction_type=prediction_type,
sample_max_value=threshold,
solver_order=order,
solver_type=solver_type,
)
def test_prediction_type(self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=prediction_type)
def test_solver_order_and_type(self):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=order,
solver_type=solver_type,
prediction_type=prediction_type,
)
sample = self.full_loop(
solver_order=order,
solver_type=solver_type,
prediction_type=prediction_type,
)
assert not torch.isnan(sample).any(), "Samples have nan numbers"
def test_lower_order_final(self):
self.check_over_configs(lower_order_final=True)
self.check_over_configs(lower_order_final=False)
def test_inference_steps(self):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=num_inference_steps, time_step=0)
def test_full_loop_no_noise(self):
sample = self.full_loop()
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.2464) < 1e-3
def test_full_loop_with_karras(self):
sample = self.full_loop(use_karras_sigmas=True)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.2925) < 1e-3
def test_full_loop_with_v_prediction(self):
sample = self.full_loop(prediction_type="v_prediction")
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.1014) < 1e-3
def test_full_loop_with_karras_and_v_prediction(self):
sample = self.full_loop(prediction_type="v_prediction", use_karras_sigmas=True)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.1966) < 1e-3
def test_fp16_support(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(thresholding=True, dynamic_thresholding_ratio=0)
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = 10
model = self.dummy_model()
sample = self.dummy_sample_deter.half()
scheduler.set_timesteps(num_inference_steps)
for i, t in enumerate(scheduler.timesteps):
residual = model(sample, t)
sample = scheduler.step(residual, t, sample).prev_sample
assert sample.dtype == torch.float16
def test_full_loop_with_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = 10
t_start = 8
model = self.dummy_model()
sample = self.dummy_sample_deter
scheduler.set_timesteps(num_inference_steps)
# add noise
noise = self.dummy_noise_deter
timesteps = scheduler.timesteps[t_start * scheduler.order :]
sample = scheduler.add_noise(sample, noise, timesteps[:1])
for i, t in enumerate(timesteps):
residual = model(sample, t)
sample = scheduler.step(residual, t, sample).prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 315.5757) < 1e-2, f" expected result sum 315.5757, but get {result_sum}"
assert abs(result_mean.item() - 0.4109) < 1e-3, f" expected result mean 0.4109, but get {result_mean}"
class UniPCMultistepScheduler1DTest(UniPCMultistepSchedulerTest):
@property
def dummy_sample(self):
batch_size = 4
num_channels = 3
width = 8
sample = torch.rand((batch_size, num_channels, width))
return sample
@property
def dummy_noise_deter(self):
batch_size = 4
num_channels = 3
width = 8
num_elems = batch_size * num_channels * width
sample = torch.arange(num_elems).flip(-1)
sample = sample.reshape(num_channels, width, batch_size)
sample = sample / num_elems
sample = sample.permute(2, 0, 1)
return sample
@property
def dummy_sample_deter(self):
batch_size = 4
num_channels = 3
width = 8
num_elems = batch_size * num_channels * width
sample = torch.arange(num_elems)
sample = sample.reshape(num_channels, width, batch_size)
sample = sample / num_elems
sample = sample.permute(2, 0, 1)
return sample
def test_switch(self):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
scheduler = UniPCMultistepScheduler(**self.get_scheduler_config())
sample = self.full_loop(scheduler=scheduler)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.2441) < 1e-3
scheduler = DPMSolverSinglestepScheduler.from_config(scheduler.config)
scheduler = DEISMultistepScheduler.from_config(scheduler.config)
scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config)
scheduler = UniPCMultistepScheduler.from_config(scheduler.config)
sample = self.full_loop(scheduler=scheduler)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.2441) < 1e-3
def test_full_loop_no_noise(self):
sample = self.full_loop()
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.2441) < 1e-3
def test_full_loop_with_karras(self):
sample = self.full_loop(use_karras_sigmas=True)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.2898) < 1e-3
def test_full_loop_with_v_prediction(self):
sample = self.full_loop(prediction_type="v_prediction")
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.1014) < 1e-3
def test_full_loop_with_karras_and_v_prediction(self):
sample = self.full_loop(prediction_type="v_prediction", use_karras_sigmas=True)
result_mean = torch.mean(torch.abs(sample))
assert abs(result_mean.item() - 0.1944) < 1e-3
def test_full_loop_with_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = 10
t_start = 8
model = self.dummy_model()
sample = self.dummy_sample_deter
scheduler.set_timesteps(num_inference_steps)
# add noise
noise = self.dummy_noise_deter
timesteps = scheduler.timesteps[t_start * scheduler.order :]
sample = scheduler.add_noise(sample, noise, timesteps[:1])
for i, t in enumerate(timesteps):
residual = model(sample, t)
sample = scheduler.step(residual, t, sample).prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 39.0870) < 1e-2, f" expected result sum 39.0870, but get {result_sum}"
assert abs(result_mean.item() - 0.4072) < 1e-3, f" expected result mean 0.4072, but get {result_mean}"
| 0
|
hf_public_repos/diffusers/tests/fixtures
|
hf_public_repos/diffusers/tests/fixtures/custom_pipeline/pipeline.py
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class CustomLocalPipeline(DiffusionPipeline):
r"""
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Parameters:
unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
[`DDPMScheduler`], or [`DDIMScheduler`].
"""
def __init__(self, unet, scheduler):
super().__init__()
self.register_modules(unet=unet, scheduler=scheduler)
@torch.no_grad()
def __call__(
self,
batch_size: int = 1,
generator: Optional[torch.Generator] = None,
num_inference_steps: int = 50,
output_type: Optional[str] = "pil",
return_dict: bool = True,
**kwargs,
) -> Union[ImagePipelineOutput, Tuple]:
r"""
Args:
batch_size (`int`, *optional*, defaults to 1):
The number of images to generate.
generator (`torch.Generator`, *optional*):
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
deterministic.
eta (`float`, *optional*, defaults to 0.0):
The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM).
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
Returns:
[`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if
`return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the
generated images.
"""
# Sample gaussian noise to begin loop
image = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
generator=generator,
)
image = image.to(self.device)
# set step values
self.scheduler.set_timesteps(num_inference_steps)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
model_output = self.unet(image, t).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to ฮท in paper and should be between [0, 1]
# do x_t -> x_t-1
image = self.scheduler.step(model_output, t, image).prev_sample
image = (image / 2 + 0.5).clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=image), "This is a local test"
| 0
|
hf_public_repos/diffusers/tests/fixtures
|
hf_public_repos/diffusers/tests/fixtures/custom_pipeline/what_ever.py
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class CustomLocalPipeline(DiffusionPipeline):
r"""
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Parameters:
unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
[`DDPMScheduler`], or [`DDIMScheduler`].
"""
def __init__(self, unet, scheduler):
super().__init__()
self.register_modules(unet=unet, scheduler=scheduler)
@torch.no_grad()
def __call__(
self,
batch_size: int = 1,
generator: Optional[torch.Generator] = None,
num_inference_steps: int = 50,
output_type: Optional[str] = "pil",
return_dict: bool = True,
**kwargs,
) -> Union[ImagePipelineOutput, Tuple]:
r"""
Args:
batch_size (`int`, *optional*, defaults to 1):
The number of images to generate.
generator (`torch.Generator`, *optional*):
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
deterministic.
eta (`float`, *optional*, defaults to 0.0):
The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM).
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple.
Returns:
[`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if
`return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the
generated images.
"""
# Sample gaussian noise to begin loop
image = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
generator=generator,
)
image = image.to(self.device)
# set step values
self.scheduler.set_timesteps(num_inference_steps)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
model_output = self.unet(image, t).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to ฮท in paper and should be between [0, 1]
# do x_t -> x_t-1
image = self.scheduler.step(model_output, t, image).prev_sample
image = (image / 2 + 0.5).clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=image), "This is a local test"
| 0
|
hf_public_repos/diffusers
|
hf_public_repos/diffusers/docs/README.md
|
<!---
Copyright 2023- The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
# Generating the documentation
To generate the documentation, you first have to build it. Several packages are necessary to build the doc,
you can install them with the following command, at the root of the code repository:
```bash
pip install -e ".[docs]"
```
Then you need to install our open source documentation builder tool:
```bash
pip install git+https://github.com/huggingface/doc-builder
```
---
**NOTE**
You only need to generate the documentation to inspect it locally (if you're planning changes and want to
check how they look before committing for instance). You don't have to commit the built documentation.
---
## Previewing the documentation
To preview the docs, first install the `watchdog` module with:
```bash
pip install watchdog
```
Then run the following command:
```bash
doc-builder preview {package_name} {path_to_docs}
```
For example:
```bash
doc-builder preview diffusers docs/source/en
```
The docs will be viewable at [http://localhost:3000](http://localhost:3000). You can also preview the docs once you have opened a PR. You will see a bot add a comment to a link where the documentation with your changes lives.
---
**NOTE**
The `preview` command only works with existing doc files. When you add a completely new file, you need to update `_toctree.yml` & restart `preview` command (`ctrl-c` to stop it & call `doc-builder preview ...` again).
---
## Adding a new element to the navigation bar
Accepted files are Markdown (.md).
Create a file with its extension and put it in the source directory. You can then link it to the toc-tree by putting
the filename without the extension in the [`_toctree.yml`](https://github.com/huggingface/diffusers/blob/main/docs/source/en/_toctree.yml) file.
## Renaming section headers and moving sections
It helps to keep the old links working when renaming the section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums, and Social media and it'd make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information.
Therefore, we simply keep a little map of moved sections at the end of the document where the original section was. The key is to preserve the original anchor.
So if you renamed a section from: "Section A" to "Section B", then you can add at the end of the file:
```md
Sections that were moved:
[ <a href="#section-b">Section A</a><a id="section-a"></a> ]
```
and of course, if you moved it to another file, then:
```md
Sections that were moved:
[ <a href="../new-file#section-b">Section A</a><a id="section-a"></a> ]
```
Use the relative style to link to the new file so that the versioned docs continue to work.
For an example of a rich moved section set please see the very end of [the transformers Trainer doc](https://github.com/huggingface/transformers/blob/main/docs/source/en/main_classes/trainer.md).
## Writing Documentation - Specification
The `huggingface/diffusers` documentation follows the
[Google documentation](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) style for docstrings,
although we can write them directly in Markdown.
### Adding a new tutorial
Adding a new tutorial or section is done in two steps:
- Add a new Markdown (.md) file under `docs/source/<languageCode>`.
- Link that file in `docs/source/<languageCode>/_toctree.yml` on the correct toc-tree.
Make sure to put your new file under the proper section. It's unlikely to go in the first section (*Get Started*), so
depending on the intended targets (beginners, more advanced users, or researchers) it should go in sections two, three, or four.
### Adding a new pipeline/scheduler
When adding a new pipeline:
- Create a file `xxx.md` under `docs/source/<languageCode>/api/pipelines` (don't hesitate to copy an existing file as template).
- Link that file in (*Diffusers Summary*) section in `docs/source/api/pipelines/overview.md`, along with the link to the paper, and a colab notebook (if available).
- Write a short overview of the diffusion model:
- Overview with paper & authors
- Paper abstract
- Tips and tricks and how to use it best
- Possible an end-to-end example of how to use it
- Add all the pipeline classes that should be linked in the diffusion model. These classes should be added using our Markdown syntax. By default as follows:
```
[[autodoc]] XXXPipeline
- all
- __call__
```
This will include every public method of the pipeline that is documented, as well as the `__call__` method that is not documented by default. If you just want to add additional methods that are not documented, you can put the list of all methods to add in a list that contains `all`.
```
[[autodoc]] XXXPipeline
- all
- __call__
- enable_attention_slicing
- disable_attention_slicing
- enable_xformers_memory_efficient_attention
- disable_xformers_memory_efficient_attention
```
You can follow the same process to create a new scheduler under the `docs/source/<languageCode>/api/schedulers` folder.
### Writing source documentation
Values that should be put in `code` should either be surrounded by backticks: \`like so\`. Note that argument names
and objects like True, None, or any strings should usually be put in `code`.
When mentioning a class, function, or method, it is recommended to use our syntax for internal links so that our tool
adds a link to its documentation with this syntax: \[\`XXXClass\`\] or \[\`function\`\]. This requires the class or
function to be in the main package.
If you want to create a link to some internal class or function, you need to
provide its path. For instance: \[\`pipelines.ImagePipelineOutput\`\]. This will be converted into a link with
`pipelines.ImagePipelineOutput` in the description. To get rid of the path and only keep the name of the object you are
linking to in the description, add a ~: \[\`~pipelines.ImagePipelineOutput\`\] will generate a link with `ImagePipelineOutput` in the description.
The same works for methods so you can either use \[\`XXXClass.method\`\] or \[\`~XXXClass.method\`\].
#### Defining arguments in a method
Arguments should be defined with the `Args:` (or `Arguments:` or `Parameters:`) prefix, followed by a line return and
an indentation. The argument should be followed by its type, with its shape if it is a tensor, a colon, and its
description:
```
Args:
n_layers (`int`): The number of layers of the model.
```
If the description is too long to fit in one line, another indentation is necessary before writing the description
after the argument.
Here's an example showcasing everything so far:
```
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AlbertTokenizer`]. See [`~PreTrainedTokenizer.encode`] and
[`~PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
```
For optional arguments or arguments with defaults we follow the following syntax: imagine we have a function with the
following signature:
```py
def my_function(x: str=None, a: float=3.14):
```
then its documentation should look like this:
```
Args:
x (`str`, *optional*):
This argument controls ...
a (`float`, *optional*, defaults to `3.14`):
This argument is used to ...
```
Note that we always omit the "defaults to \`None\`" when None is the default for any argument. Also note that even
if the first line describing your argument type and its default gets long, you can't break it on several lines. You can
however write as many lines as you want in the indented description (see the example above with `input_ids`).
#### Writing a multi-line code block
Multi-line code blocks can be useful for displaying examples. They are done between two lines of three backticks as usual in Markdown:
````
```
# first line of code
# second line
# etc
```
````
#### Writing a return block
The return block should be introduced with the `Returns:` prefix, followed by a line return and an indentation.
The first line should be the type of the return, followed by a line return. No need to indent further for the elements
building the return.
Here's an example of a single value return:
```
Returns:
`List[int]`: A list of integers in the range [0, 1] --- 1 for a special token, 0 for a sequence token.
```
Here's an example of a tuple return, comprising several objects:
```
Returns:
`tuple(torch.FloatTensor)` comprising various elements depending on the configuration ([`BertConfig`]) and inputs:
- ** loss** (*optional*, returned when `masked_lm_labels` is provided) `torch.FloatTensor` of shape `(1,)` --
Total loss is the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
- **prediction_scores** (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`) --
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
```
#### Adding an image
Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos, and other non-text files. We prefer to leverage a hf.co hosted `dataset` like
the ones hosted on [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) in which to place these files and reference
them by URL. We recommend putting them in the following dataset: [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images).
If an external contribution, feel free to add the images to your PR and ask a Hugging Face member to migrate your images
to this dataset.
## Styling the docstring
We have an automatic script running with the `make style` command that will make sure that:
- the docstrings fully take advantage of the line width
- all code examples are formatted using black, like the code of the Transformers library
This script may have some weird failures if you made a syntax mistake or if you uncover a bug. Therefore, it's
recommended to commit your changes before running `make style`, so you can revert the changes done by that script
easily.
| 0
|
hf_public_repos/diffusers
|
hf_public_repos/diffusers/docs/TRANSLATING.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
### Translating the Diffusers documentation into your language
As part of our mission to democratize machine learning, we'd love to make the Diffusers library available in many more languages! Follow the steps below if you want to help translate the documentation into your language ๐.
**๐๏ธ Open an issue**
To get started, navigate to the [Issues](https://github.com/huggingface/diffusers/issues) page of this repo and check if anyone else has opened an issue for your language. If not, open a new issue by selecting the "๐ Translating a New Language?" from the "New issue" button.
Once an issue exists, post a comment to indicate which chapters you'd like to work on, and we'll add your name to the list.
**๐ด Fork the repository**
First, you'll need to [fork the Diffusers repo](https://docs.github.com/en/get-started/quickstart/fork-a-repo). You can do this by clicking on the **Fork** button on the top-right corner of this repo's page.
Once you've forked the repo, you'll want to get the files on your local machine for editing. You can do that by cloning the fork with Git as follows:
```bash
git clone https://github.com/<YOUR-USERNAME>/diffusers.git
```
**๐ Copy-paste the English version with a new language code**
The documentation files are in one leading directory:
- [`docs/source`](https://github.com/huggingface/diffusers/tree/main/docs/source): All the documentation materials are organized here by language.
You'll only need to copy the files in the [`docs/source/en`](https://github.com/huggingface/diffusers/tree/main/docs/source/en) directory, so first navigate to your fork of the repo and run the following:
```bash
cd ~/path/to/diffusers/docs
cp -r source/en source/<LANG-ID>
```
Here, `<LANG-ID>` should be one of the ISO 639-1 or ISO 639-2 language codes -- see [here](https://www.loc.gov/standards/iso639-2/php/code_list.php) for a handy table.
**โ๏ธ Start translating**
The fun part comes - translating the text!
The first thing we recommend is translating the part of the `_toctree.yml` file that corresponds to your doc chapter. This file is used to render the table of contents on the website.
> ๐ If the `_toctree.yml` file doesn't yet exist for your language, you can create one by copy-pasting from the English version and deleting the sections unrelated to your chapter. Just make sure it exists in the `docs/source/<LANG-ID>/` directory!
The fields you should add are `local` (with the name of the file containing the translation; e.g. `autoclass_tutorial`), and `title` (with the title of the doc in your language; e.g. `Load pretrained instances with an AutoClass`) -- as a reference, here is the `_toctree.yml` for [English](https://github.com/huggingface/diffusers/blob/main/docs/source/en/_toctree.yml):
```yaml
- sections:
- local: pipeline_tutorial # Do not change this! Use the same name for your .md file
title: Pipelines for inference # Translate this!
...
title: Tutorials # Translate this!
```
Once you have translated the `_toctree.yml` file, you can start translating the [MDX](https://mdxjs.com/) files associated with your docs chapter.
> ๐ If you'd like others to help you with the translation, you should [open an issue](https://github.com/huggingface/diffusers/issues) and tag @patrickvonplaten.
| 0
|
hf_public_repos/diffusers/docs
|
hf_public_repos/diffusers/docs/source/_config.py
|
# docstyle-ignore
INSTALL_CONTENT = """
# Diffusers installation
! pip install diffusers transformers datasets accelerate
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/diffusers.git
"""
notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}]
| 0
|
hf_public_repos/diffusers/docs/source
|
hf_public_repos/diffusers/docs/source/ko/in_translation.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# ๋ฒ์ญ์ค
์ด์ฌํ ๋ฒ์ญ์ ์งํ์ค์
๋๋ค. ์กฐ๊ธ๋ง ๊ธฐ๋ค๋ ค์ฃผ์ธ์.
๊ฐ์ฌํฉ๋๋ค!
| 0
|
hf_public_repos/diffusers/docs/source
|
hf_public_repos/diffusers/docs/source/ko/installation.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# ์ค์น
์ฌ์ฉํ์๋ ๋ผ์ด๋ธ๋ฌ๋ฆฌ์ ๋ง๋ ๐ค Diffusers๋ฅผ ์ค์นํ์ธ์.
๐ค Diffusers๋ Python 3.8+, PyTorch 1.7.0+ ๋ฐ flax์์ ํ
์คํธ๋์์ต๋๋ค. ์ฌ์ฉ์ค์ธ ๋ฅ๋ฌ๋ ๋ผ์ด๋ธ๋ฌ๋ฆฌ์ ๋ํ ์๋์ ์ค์น ์๋ด๋ฅผ ๋ฐ๋ฅด์ธ์.
- [PyTorch ์ค์น ์๋ด](https://pytorch.org/get-started/locally/)
- [Flax ์ค์น ์๋ด](https://flax.readthedocs.io/en/latest/)
## pip๋ฅผ ์ด์ฉํ ์ค์น
[๊ฐ์ ํ๊ฒฝ](https://docs.python.org/3/library/venv.html)์ ๐ค Diffusers๋ฅผ ์ค์นํด์ผ ํฉ๋๋ค.
Python ๊ฐ์ ํ๊ฒฝ์ ์ต์ํ์ง ์์ ๊ฒฝ์ฐ [๊ฐ์ํ๊ฒฝ pip ์ค์น ๊ฐ์ด๋](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/)๋ฅผ ์ดํด๋ณด์ธ์.
๊ฐ์ ํ๊ฒฝ์ ์ฌ์ฉํ๋ฉด ์๋ก ๋ค๋ฅธ ํ๋ก์ ํธ๋ฅผ ๋ ์ฝ๊ฒ ๊ด๋ฆฌํ๊ณ , ์ข
์์ฑ๊ฐ์ ํธํ์ฑ ๋ฌธ์ ๋ฅผ ํผํ ์ ์์ต๋๋ค.
ํ๋ก์ ํธ ๋๋ ํ ๋ฆฌ์ ๊ฐ์ ํ๊ฒฝ์ ์์ฑํ๋ ๊ฒ์ผ๋ก ์์ํ์ธ์:
```bash
python -m venv .env
```
๊ทธ๋ฆฌ๊ณ ๊ฐ์ ํ๊ฒฝ์ ํ์ฑํํฉ๋๋ค:
```bash
source .env/bin/activate
```
์ด์ ๋ค์์ ๋ช
๋ น์ด๋ก ๐ค Diffusers๋ฅผ ์ค์นํ ์ค๋น๊ฐ ๋์์ต๋๋ค:
**PyTorch์ ๊ฒฝ์ฐ**
```bash
pip install diffusers["torch"]
```
**Flax์ ๊ฒฝ์ฐ**
```bash
pip install diffusers["flax"]
```
## ์์ค๋ก๋ถํฐ ์ค์น
์์ค์์ `diffusers`๋ฅผ ์ค์นํ๊ธฐ ์ ์, `torch` ๋ฐ `accelerate`์ด ์ค์น๋์ด ์๋์ง ํ์ธํ์ธ์.
`torch` ์ค์น์ ๋ํด์๋ [torch docs](https://pytorch.org/get-started/locally/#start-locally)๋ฅผ ์ฐธ๊ณ ํ์ธ์.
๋ค์๊ณผ ๊ฐ์ด `accelerate`์ ์ค์นํ์ธ์.
```bash
pip install accelerate
```
๋ค์ ๋ช
๋ น์ด๋ฅผ ์ฌ์ฉํ์ฌ ์์ค์์ ๐ค Diffusers๋ฅผ ์ค์นํ์ธ์:
```bash
pip install git+https://github.com/huggingface/diffusers
```
์ด ๋ช
๋ น์ด๋ ์ต์ `stable` ๋ฒ์ ์ด ์๋ ์ต์ฒจ๋จ `main` ๋ฒ์ ์ ์ค์นํฉ๋๋ค.
`main` ๋ฒ์ ์ ์ต์ ๊ฐ๋ฐ ์ ๋ณด๋ฅผ ์ต์ ์ํ๋ก ์ ์งํ๋ ๋ฐ ์ ์ฉํฉ๋๋ค.
์๋ฅผ ๋ค์ด ๋ง์ง๋ง ๊ณต์ ๋ฆด๋ฆฌ์ฆ ์ดํ ๋ฒ๊ทธ๊ฐ ์์ ๋์์ง๋ง, ์ ๋ฆด๋ฆฌ์ฆ๊ฐ ์์ง ์ถ์๋์ง ์์ ๊ฒฝ์ฐ์
๋๋ค.
๊ทธ๋ฌ๋ ์ด๋ `main` ๋ฒ์ ์ด ํญ์ ์์ ์ ์ด์ง ์์ ์ ์์์ ์๋ฏธํฉ๋๋ค.
์ฐ๋ฆฌ๋ `main` ๋ฒ์ ์ด ์ง์์ ์ผ๋ก ์๋ํ๋๋ก ๋
ธ๋ ฅํ๊ณ ์์ผ๋ฉฐ, ๋๋ถ๋ถ์ ๋ฌธ์ ๋ ๋ณดํต ๋ช ์๊ฐ ๋๋ ํ๋ฃจ ์์ ํด๊ฒฐ๋ฉ๋๋ค.
๋ฌธ์ ๊ฐ ๋ฐ์ํ๋ฉด ๋ ๋นจ๋ฆฌ ํด๊ฒฐํ ์ ์๋๋ก [Issue](https://github.com/huggingface/transformers/issues)๋ฅผ ์ด์ด์ฃผ์ธ์!
## ํธ์ง๊ฐ๋ฅํ ์ค์น
๋ค์์ ์ํํ๋ ค๋ฉด ํธ์ง๊ฐ๋ฅํ ์ค์น๊ฐ ํ์ํฉ๋๋ค:
* ์์ค ์ฝ๋์ `main` ๋ฒ์ ์ ์ฌ์ฉ
* ๐ค Diffusers์ ๊ธฐ์ฌ (์ฝ๋์ ๋ณ๊ฒฝ ์ฌํญ์ ํ
์คํธํ๊ธฐ ์ํด ํ์)
์ ์ฅ์๋ฅผ ๋ณต์ ํ๊ณ ๋ค์ ๋ช
๋ น์ด๋ฅผ ์ฌ์ฉํ์ฌ ๐ค Diffusers๋ฅผ ์ค์นํฉ๋๋ค:
```bash
git clone https://github.com/huggingface/diffusers.git
cd diffusers
```
**PyTorch์ ๊ฒฝ์ฐ**
```
pip install -e ".[torch]"
```
**Flax์ ๊ฒฝ์ฐ**
```
pip install -e ".[flax]"
```
์ด๋ฌํ ๋ช
๋ น์ด๋ค์ ์ ์ฅ์๋ฅผ ๋ณต์ ํ ํด๋์ Python ๋ผ์ด๋ธ๋ฌ๋ฆฌ ๊ฒฝ๋ก๋ฅผ ์ฐ๊ฒฐํฉ๋๋ค.
Python์ ์ด์ ์ผ๋ฐ ๋ผ์ด๋ธ๋ฌ๋ฆฌ ๊ฒฝ๋ก์ ๋ํ์ฌ ๋ณต์ ํ ํด๋ ๋ด๋ถ๋ฅผ ์ดํด๋ด
๋๋ค.
์๋ฅผ๋ค์ด Python ํจํค์ง๊ฐ `~/anaconda3/envs/main/lib/python3.8/site-packages/`์ ์ค์น๋์ด ์๋ ๊ฒฝ์ฐ Python์ ๋ณต์ ํ ํด๋์ธ `~/diffusers/`๋ ๊ฒ์ํฉ๋๋ค.
<Tip warning={true}>
๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ฅผ ๊ณ์ ์ฌ์ฉํ๋ ค๋ฉด `diffusers` ํด๋๋ฅผ ์ ์งํด์ผ ํฉ๋๋ค.
</Tip>
์ด์ ๋ค์ ๋ช
๋ น์ด๋ฅผ ์ฌ์ฉํ์ฌ ์ต์ ๋ฒ์ ์ ๐ค Diffusers๋ก ์ฝ๊ฒ ์
๋ฐ์ดํธํ ์ ์์ต๋๋ค:
```bash
cd ~/diffusers/
git pull
```
์ด๋ ๊ฒ ํ๋ฉด, ๋ค์์ ์คํํ ๋ Python ํ๊ฒฝ์ด ๐ค Diffusers์ `main` ๋ฒ์ ์ ์ฐพ๊ฒ ๋ฉ๋๋ค.
## ํ
๋ ๋ฉํธ๋ฆฌ ๋ก๊น
์ ๋ํ ์๋ฆผ
์ฐ๋ฆฌ ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ `from_pretrained()` ์์ฒญ ์ค์ ํ
๋ ๋ฉํธ๋ฆฌ ์ ๋ณด๋ฅผ ์๊ฒฉ์ผ๋ก ์์งํฉ๋๋ค.
์ด ๋ฐ์ดํฐ์๋ Diffusers ๋ฐ PyTorch/Flax์ ๋ฒ์ , ์์ฒญ๋ ๋ชจ๋ธ ๋๋ ํ์ดํ๋ผ์ธ ํด๋์ค, ๊ทธ๋ฆฌ๊ณ ํ๋ธ์์ ํธ์คํ
๋๋ ๊ฒฝ์ฐ ์ฌ์ ํ์ต๋ ์ฒดํฌํฌ์ธํธ์ ๋ํ ๊ฒฝ๋ก๋ฅผ ํฌํจํฉ๋๋ค.
์ด ์ฌ์ฉ ๋ฐ์ดํฐ๋ ๋ฌธ์ ๋ฅผ ๋๋ฒ๊น
ํ๊ณ ์๋ก์ด ๊ธฐ๋ฅ์ ์ฐ์ ์์๋ฅผ ์ง์ ํ๋๋ฐ ๋์์ด ๋ฉ๋๋ค.
ํ
๋ ๋ฉํธ๋ฆฌ๋ HuggingFace ํ๋ธ์์ ๋ชจ๋ธ๊ณผ ํ์ดํ๋ผ์ธ์ ๋ถ๋ฌ์ฌ ๋๋ง ์ ์ก๋๋ฉฐ, ๋ก์ปฌ ์ฌ์ฉ ์ค์๋ ์์ง๋์ง ์์ต๋๋ค.
์ฐ๋ฆฌ๋ ์ถ๊ฐ ์ ๋ณด๋ฅผ ๊ณต์ ํ์ง ์๊ธฐ๋ฅผ ์ํ๋ ์ฌ๋์ด ์๋ค๋ ๊ฒ์ ์ดํดํ๊ณ ๊ฐ์ธ ์ ๋ณด๋ฅผ ์กด์คํ๋ฏ๋ก, ํฐ๋ฏธ๋์์ `DISABLE_TELEMETRY` ํ๊ฒฝ ๋ณ์๋ฅผ ์ค์ ํ์ฌ ํ
๋ ๋ฉํธ๋ฆฌ ์์ง์ ๋นํ์ฑํํ ์ ์์ต๋๋ค.
Linux/MacOS์์:
```bash
export DISABLE_TELEMETRY=YES
```
Windows์์:
```bash
set DISABLE_TELEMETRY=YES
```
| 0
|
hf_public_repos/diffusers/docs/source
|
hf_public_repos/diffusers/docs/source/ko/_toctree.yml
|
- sections:
- local: index
title: "๐งจ Diffusers"
- local: quicktour
title: "ํ์ด๋ณด๊ธฐ"
- local: stable_diffusion
title: Stable Diffusion
- local: installation
title: "์ค์น"
title: "์์ํ๊ธฐ"
- sections:
- local: tutorials/tutorial_overview
title: ๊ฐ์
- local: using-diffusers/write_own_pipeline
title: ๋ชจ๋ธ๊ณผ ์ค์ผ์ค๋ฌ ์ดํดํ๊ธฐ
- local: in_translation
title: AutoPipeline
- local: tutorials/basic_training
title: Diffusion ๋ชจ๋ธ ํ์ตํ๊ธฐ
title: Tutorials
- sections:
- sections:
- local: using-diffusers/loading_overview
title: ๊ฐ์
- local: using-diffusers/loading
title: ํ์ดํ๋ผ์ธ, ๋ชจ๋ธ, ์ค์ผ์ค๋ฌ ๋ถ๋ฌ์ค๊ธฐ
- local: using-diffusers/schedulers
title: ๋ค๋ฅธ ์ค์ผ์ค๋ฌ๋ค์ ๊ฐ์ ธ์ค๊ณ ๋น๊ตํ๊ธฐ
- local: using-diffusers/custom_pipeline_overview
title: ์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ ๋ถ๋ฌ์ค๊ธฐ
- local: using-diffusers/using_safetensors
title: ์ธ์ดํํ
์ ๋ถ๋ฌ์ค๊ธฐ
- local: using-diffusers/other-formats
title: ๋ค๋ฅธ ํ์์ Stable Diffusion ๋ถ๋ฌ์ค๊ธฐ
- local: in_translation
title: Hub์ ํ์ผ pushํ๊ธฐ
title: ๋ถ๋ฌ์ค๊ธฐ & ํ๋ธ
- sections:
- local: using-diffusers/pipeline_overview
title: ๊ฐ์
- local: using-diffusers/unconditional_image_generation
title: Unconditional ์ด๋ฏธ์ง ์์ฑ
- local: using-diffusers/conditional_image_generation
title: Text-to-image ์์ฑ
- local: using-diffusers/img2img
title: Text-guided image-to-image
- local: using-diffusers/inpaint
title: Text-guided ์ด๋ฏธ์ง ์ธํ์ธํ
- local: using-diffusers/depth2img
title: Text-guided depth-to-image
- local: using-diffusers/textual_inversion_inference
title: Textual inversion
- local: training/distributed_inference
title: ์ฌ๋ฌ GPU๋ฅผ ์ฌ์ฉํ ๋ถ์ฐ ์ถ๋ก
- local: in_translation
title: Distilled Stable Diffusion ์ถ๋ก
- local: using-diffusers/reusing_seeds
title: Deterministic ์์ฑ์ผ๋ก ์ด๋ฏธ์ง ํ๋ฆฌํฐ ๋์ด๊ธฐ
- local: using-diffusers/control_brightness
title: ์ด๋ฏธ์ง ๋ฐ๊ธฐ ์กฐ์ ํ๊ธฐ
- local: using-diffusers/reproducibility
title: ์ฌํ ๊ฐ๋ฅํ ํ์ดํ๋ผ์ธ ์์ฑํ๊ธฐ
- local: using-diffusers/custom_pipeline_examples
title: ์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ๋ค
- local: using-diffusers/contribute_pipeline
title: ์ปค๋ฎคํฐ๋ ํ์ดํ๋ผ์ธ์ ๊ธฐ์ฌํ๋ ๋ฐฉ๋ฒ
- local: using-diffusers/stable_diffusion_jax_how_to
title: JAX/Flax์์์ Stable Diffusion
- local: using-diffusers/weighted_prompts
title: Weighting Prompts
title: ์ถ๋ก ์ ์ํ ํ์ดํ๋ผ์ธ
- sections:
- local: training/overview
title: ๊ฐ์
- local: training/create_dataset
title: ํ์ต์ ์ํ ๋ฐ์ดํฐ์
์์ฑํ๊ธฐ
- local: training/adapt_a_model
title: ์๋ก์ด ํ์คํฌ์ ๋ชจ๋ธ ์ ์ฉํ๊ธฐ
- local: training/unconditional_training
title: Unconditional ์ด๋ฏธ์ง ์์ฑ
- local: training/text_inversion
title: Textual Inversion
- local: training/dreambooth
title: DreamBooth
- local: training/text2image
title: Text-to-image
- local: training/lora
title: Low-Rank Adaptation of Large Language Models (LoRA)
- local: training/controlnet
title: ControlNet
- local: training/instructpix2pix
title: InstructPix2Pix ํ์ต
- local: training/custom_diffusion
title: Custom Diffusion
title: Training
title: Diffusers ์ฌ์ฉํ๊ธฐ
- sections:
- local: optimization/opt_overview
title: ๊ฐ์
- local: optimization/fp16
title: ๋ฉ๋ชจ๋ฆฌ์ ์๋
- local: optimization/torch2.0
title: Torch2.0 ์ง์
- local: optimization/xformers
title: xFormers
- local: optimization/onnx
title: ONNX
- local: optimization/open_vino
title: OpenVINO
- local: optimization/coreml
title: Core ML
- local: optimization/mps
title: MPS
- local: optimization/habana
title: Habana Gaudi
- local: optimization/tome
title: Token Merging
title: ์ต์ ํ/ํน์ ํ๋์จ์ด
- sections:
- local: using-diffusers/controlling_generation
title: ์ ์ด๋ ์์ฑ
- local: in_translation
title: Diffusion Models ํ๊ฐํ๊ธฐ
title: ๊ฐ๋
๊ฐ์ด๋
- sections:
- sections:
- sections:
- local: api/pipelines/stable_diffusion/stable_diffusion_xl
title: Stable Diffusion XL
title: Stable Diffusion
title: Pipelines
title: API
| 0
|
hf_public_repos/diffusers/docs/source
|
hf_public_repos/diffusers/docs/source/ko/stable_diffusion.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# ํจ๊ณผ์ ์ด๊ณ ํจ์จ์ ์ธ Diffusion
[[open-in-colab]]
ํน์ ์คํ์ผ๋ก ์ด๋ฏธ์ง๋ฅผ ์์ฑํ๊ฑฐ๋ ์ํ๋ ๋ด์ฉ์ ํฌํจํ๋๋ก[`DiffusionPipeline`]์ ์ค์ ํ๋ ๊ฒ์ ๊น๋ค๋ก์ธ ์ ์์ต๋๋ค. ์ข
์ข
๋ง์กฑ์ค๋ฌ์ด ์ด๋ฏธ์ง๋ฅผ ์ป๊ธฐ๊น์ง [`DiffusionPipeline`]์ ์ฌ๋ฌ ๋ฒ ์คํํด์ผ ํ๋ ๊ฒฝ์ฐ๊ฐ ๋ง์ต๋๋ค. ๊ทธ๋ฌ๋ ๋ฌด์์ ์ ๋ฅผ ์ฐฝ์กฐํ๋ ๊ฒ์ ํนํ ์ถ๋ก ์ ๋ฐ๋ณตํด์ ์คํํ๋ ๊ฒฝ์ฐ ๊ณ์ฐ ์ง์ฝ์ ์ธ ํ๋ก์ธ์ค์
๋๋ค.
๊ทธ๋ ๊ธฐ ๋๋ฌธ์ ํ์ดํ๋ผ์ธ์์ *๊ณ์ฐ*(์๋) ๋ฐ *๋ฉ๋ชจ๋ฆฌ*(GPU RAM) ํจ์จ์ฑ์ ๊ทน๋ํํ์ฌ ์ถ๋ก ์ฃผ๊ธฐ ์ฌ์ด์ ์๊ฐ์ ๋จ์ถํ์ฌ ๋ ๋น ๋ฅด๊ฒ ๋ฐ๋ณตํ ์ ์๋๋ก ํ๋ ๊ฒ์ด ์ค์ํฉ๋๋ค.
์ด ํํ ๋ฆฌ์ผ์์๋ [`DiffusionPipeline`]์ ์ฌ์ฉํ์ฌ ๋ ๋น ๋ฅด๊ณ ํจ๊ณผ์ ์ผ๋ก ์์ฑํ๋ ๋ฐฉ๋ฒ์ ์๋ดํฉ๋๋ค.
[`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) ๋ชจ๋ธ์ ๋ถ๋ฌ์์ ์์ํฉ๋๋ค:
```python
from diffusers import DiffusionPipeline
model_id = "runwayml/stable-diffusion-v1-5"
pipeline = DiffusionPipeline.from_pretrained(model_id)
```
์์ ํ๋กฌํํธ๋ "portrait of an old warrior chief" ์ด์ง๋ง, ์์ ๋กญ๊ฒ ์์ ๋ง์ ํ๋กฌํํธ๋ฅผ ์ฌ์ฉํด๋ ๋ฉ๋๋ค:
```python
prompt = "portrait photo of a old warrior chief"
```
## ์๋
<Tip>
๐ก GPU์ ์ก์ธ์คํ ์ ์๋ ๊ฒฝ์ฐ ๋ค์๊ณผ ๊ฐ์ GPU ์ ๊ณต์
์ฒด์์ ๋ฌด๋ฃ๋ก ์ฌ์ฉํ ์ ์์ต๋๋ค!. [Colab](https://colab.research.google.com/)
</Tip>
์ถ๋ก ์๋๋ฅผ ๋์ด๋ ๊ฐ์ฅ ๊ฐ๋จํ ๋ฐฉ๋ฒ ์ค ํ๋๋ Pytorch ๋ชจ๋์ ์ฌ์ฉํ ๋์ ๊ฐ์ ๋ฐฉ์์ผ๋ก GPU์ ํ์ดํ๋ผ์ธ์ ๋ฐฐ์นํ๋ ๊ฒ์
๋๋ค:
```python
pipeline = pipeline.to("cuda")
```
๋์ผํ ์ด๋ฏธ์ง๋ฅผ ์ฌ์ฉํ๊ณ ๊ฐ์ ํ ์ ์๋์ง ํ์ธํ๋ ค๋ฉด [`Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html)๋ฅผ ์ฌ์ฉํ๊ณ [์ฌํ์ฑ](./using-diffusers/reproducibility)์ ๋ํ ์๋๋ฅผ ์ค์ ํ์ธ์:
```python
import torch
generator = torch.Generator("cuda").manual_seed(0)
```
์ด์ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ ์ ์์ต๋๋ค:
```python
image = pipeline(prompt, generator=generator).images[0]
image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_1.png">
</div>
์ด ํ๋ก์ธ์ค๋ T4 GPU์์ ์ฝ 30์ด๊ฐ ์์๋์์ต๋๋ค(ํ ๋น๋ GPU๊ฐ T4๋ณด๋ค ๋์ ๊ฒฝ์ฐ ๋ ๋น ๋ฅผ ์ ์์). ๊ธฐ๋ณธ์ ์ผ๋ก [`DiffusionPipeline`]์ 50๊ฐ์ ์ถ๋ก ๋จ๊ณ์ ๋ํด ์ ์ฒด `float32` ์ ๋ฐ๋๋ก ์ถ๋ก ์ ์คํํฉ๋๋ค. `float16`๊ณผ ๊ฐ์ ๋ ๋ฎ์ ์ ๋ฐ๋๋ก ์ ํํ๊ฑฐ๋ ์ถ๋ก ๋จ๊ณ๋ฅผ ๋ ์ ๊ฒ ์คํํ์ฌ ์๋๋ฅผ ๋์ผ ์ ์์ต๋๋ค.
`float16`์ผ๋ก ๋ชจ๋ธ์ ๋ก๋ํ๊ณ ์ด๋ฏธ์ง๋ฅผ ์์ฑํด ๋ณด๊ฒ ์ต๋๋ค:
```python
import torch
pipeline = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
pipeline = pipeline.to("cuda")
generator = torch.Generator("cuda").manual_seed(0)
image = pipeline(prompt, generator=generator).images[0]
image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_2.png">
</div>
์ด๋ฒ์๋ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ๋ ๋ฐ ์ฝ 11์ด๋ฐ์ ๊ฑธ๋ฆฌ์ง ์์ ์ด์ ๋ณด๋ค 3๋ฐฐ ๊ฐ๊น์ด ๋นจ๋ผ์ก์ต๋๋ค!
<Tip>
๐ก ํ์ดํ๋ผ์ธ์ ํญ์ `float16`์์ ์คํํ ๊ฒ์ ๊ฐ๋ ฅํ ๊ถ์ฅํ๋ฉฐ, ์ง๊ธ๊น์ง ์ถ๋ ฅ ํ์ง์ด ์ ํ๋๋ ๊ฒฝ์ฐ๋ ๊ฑฐ์ ์์์ต๋๋ค.
</Tip>
๋ ๋ค๋ฅธ ์ต์
์ ์ถ๋ก ๋จ๊ณ์ ์๋ฅผ ์ค์ด๋ ๊ฒ์
๋๋ค. ๋ณด๋ค ํจ์จ์ ์ธ ์ค์ผ์ค๋ฌ๋ฅผ ์ ํํ๋ฉด ์ถ๋ ฅ ํ์ง ์ ํ ์์ด ๋จ๊ณ ์๋ฅผ ์ค์ด๋ ๋ฐ ๋์์ด ๋ ์ ์์ต๋๋ค. ํ์ฌ ๋ชจ๋ธ๊ณผ ํธํ๋๋ ์ค์ผ์ค๋ฌ๋ `compatibles` ๋ฉ์๋๋ฅผ ํธ์ถํ์ฌ [`DiffusionPipeline`]์์ ์ฐพ์ ์ ์์ต๋๋ค:
```python
pipeline.scheduler.compatibles
[
diffusers.schedulers.scheduling_lms_discrete.LMSDiscreteScheduler,
diffusers.schedulers.scheduling_unipc_multistep.UniPCMultistepScheduler,
diffusers.schedulers.scheduling_k_dpm_2_discrete.KDPM2DiscreteScheduler,
diffusers.schedulers.scheduling_deis_multistep.DEISMultistepScheduler,
diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler,
diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler,
diffusers.schedulers.scheduling_ddpm.DDPMScheduler,
diffusers.schedulers.scheduling_dpmsolver_singlestep.DPMSolverSinglestepScheduler,
diffusers.schedulers.scheduling_k_dpm_2_ancestral_discrete.KDPM2AncestralDiscreteScheduler,
diffusers.schedulers.scheduling_heun_discrete.HeunDiscreteScheduler,
diffusers.schedulers.scheduling_pndm.PNDMScheduler,
diffusers.schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteScheduler,
diffusers.schedulers.scheduling_ddim.DDIMScheduler,
]
```
Stable Diffusion ๋ชจ๋ธ์ ์ผ๋ฐ์ ์ผ๋ก ์ฝ 50๊ฐ์ ์ถ๋ก ๋จ๊ณ๊ฐ ํ์ํ [`PNDMScheduler`]๋ฅผ ๊ธฐ๋ณธ์ผ๋ก ์ฌ์ฉํ์ง๋ง, [`DPMSolverMultistepScheduler`]์ ๊ฐ์ด ์ฑ๋ฅ์ด ๋ ๋ฐ์ด๋ ์ค์ผ์ค๋ฌ๋ ์ฝ 20๊ฐ ๋๋ 25๊ฐ์ ์ถ๋ก ๋จ๊ณ๋ง ํ์๋ก ํฉ๋๋ค. ์ ์ค์ผ์ค๋ฌ๋ฅผ ๋ก๋ํ๋ ค๋ฉด [`ConfigMixin.from_config`] ๋ฉ์๋๋ฅผ ์ฌ์ฉํฉ๋๋ค:
```python
from diffusers import DPMSolverMultistepScheduler
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
```
`num_inference_steps`๋ฅผ 20์ผ๋ก ์ค์ ํฉ๋๋ค:
```python
generator = torch.Generator("cuda").manual_seed(0)
image = pipeline(prompt, generator=generator, num_inference_steps=20).images[0]
image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_3.png">
</div>
์ถ๋ก ์๊ฐ์ 4์ด๋ก ๋จ์ถํ ์ ์์์ต๋๋ค! โก๏ธ
## ๋ฉ๋ชจ๋ฆฌ
ํ์ดํ๋ผ์ธ ์ฑ๋ฅ ํฅ์์ ๋ ๋ค๋ฅธ ํต์ฌ์ ๋ฉ๋ชจ๋ฆฌ ์ฌ์ฉ๋์ ์ค์ด๋ ๊ฒ์ธ๋ฐ, ์ด๋น ์์ฑ๋๋ ์ด๋ฏธ์ง ์๋ฅผ ์ต๋ํํ๋ ค๊ณ ํ๋ ๊ฒฝ์ฐ๊ฐ ๋ง๊ธฐ ๋๋ฌธ์ ๊ฐ์ ์ ์ผ๋ก ๋ ๋น ๋ฅธ ์๋๋ฅผ ์๋ฏธํฉ๋๋ค. ํ ๋ฒ์ ์์ฑํ ์ ์๋ ์ด๋ฏธ์ง ์๋ฅผ ํ์ธํ๋ ๊ฐ์ฅ ์ฌ์ด ๋ฐฉ๋ฒ์ `OutOfMemoryError`(OOM)์ด ๋ฐ์ํ ๋๊น์ง ๋ค์ํ ๋ฐฐ์น ํฌ๊ธฐ๋ฅผ ์๋ํด ๋ณด๋ ๊ฒ์
๋๋ค.
ํ๋กฌํํธ ๋ชฉ๋ก๊ณผ `Generators`์์ ์ด๋ฏธ์ง ๋ฐฐ์น๋ฅผ ์์ฑํ๋ ํจ์๋ฅผ ๋ง๋ญ๋๋ค. ์ข์ ๊ฒฐ๊ณผ๋ฅผ ์์ฑํ๋ ๊ฒฝ์ฐ ์ฌ์ฌ์ฉํ ์ ์๋๋ก ๊ฐ `Generator`์ ์๋๋ฅผ ํ ๋นํด์ผ ํฉ๋๋ค.
```python
def get_inputs(batch_size=1):
generator = [torch.Generator("cuda").manual_seed(i) for i in range(batch_size)]
prompts = batch_size * [prompt]
num_inference_steps = 20
return {"prompt": prompts, "generator": generator, "num_inference_steps": num_inference_steps}
```
๋ํ ๊ฐ ์ด๋ฏธ์ง ๋ฐฐ์น๋ฅผ ๋ณด์ฌ์ฃผ๋ ๊ธฐ๋ฅ์ด ํ์ํฉ๋๋ค:
```python
from PIL import Image
def image_grid(imgs, rows=2, cols=2):
w, h = imgs[0].size
grid = Image.new("RGB", size=(cols * w, rows * h))
for i, img in enumerate(imgs):
grid.paste(img, box=(i % cols * w, i // cols * h))
return grid
```
`batch_size=4`๋ถํฐ ์์ํด ์ผ๋ง๋ ๋ง์ ๋ฉ๋ชจ๋ฆฌ๋ฅผ ์๋นํ๋์ง ํ์ธํฉ๋๋ค:
```python
images = pipeline(**get_inputs(batch_size=4)).images
image_grid(images)
```
RAM์ด ๋ ๋ง์ GPU๊ฐ ์๋๋ผ๋ฉด ์์ ์ฝ๋์์ `OOM` ์ค๋ฅ๊ฐ ๋ฐํ๋์์ ๊ฒ์
๋๋ค! ๋๋ถ๋ถ์ ๋ฉ๋ชจ๋ฆฌ๋ cross-attention ๋ ์ด์ด๊ฐ ์ฐจ์งํฉ๋๋ค. ์ด ์์
์ ๋ฐฐ์น๋ก ์คํํ๋ ๋์ ์์ฐจ์ ์ผ๋ก ์คํํ๋ฉด ์๋นํ ์์ ๋ฉ๋ชจ๋ฆฌ๋ฅผ ์ ์ฝํ ์ ์์ต๋๋ค. ํ์ดํ๋ผ์ธ์ ๊ตฌ์ฑํ์ฌ [`~DiffusionPipeline.enable_attention_slicing`] ํจ์๋ฅผ ์ฌ์ฉํ๊ธฐ๋ง ํ๋ฉด ๋ฉ๋๋ค:
```python
pipeline.enable_attention_slicing()
```
์ด์ `batch_size`๋ฅผ 8๋ก ๋๋ ค๋ณด์ธ์!
```python
images = pipeline(**get_inputs(batch_size=8)).images
image_grid(images, rows=2, cols=4)
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_5.png">
</div>
์ด์ ์๋ 4๊ฐ์ ์ด๋ฏธ์ง๋ฅผ ๋ฐฐ์น๋ก ์์ฑํ ์๋ ์์์ง๋ง, ์ด์ ๋ ์ด๋ฏธ์ง๋น ์ฝ 3.5์ด ๋ง์ 8๊ฐ์ ์ด๋ฏธ์ง๋ฅผ ๋ฐฐ์น๋ก ์์ฑํ ์ ์์ต๋๋ค! ์ด๋ ์๋ง๋ ํ์ง ์ ํ ์์ด T4 GPU์์ ๊ฐ์ฅ ๋น ๋ฅธ ์๋์ผ ๊ฒ์
๋๋ค.
## ํ์ง
์ง๋ ๋ ์น์
์์๋ `fp16`์ ์ฌ์ฉํ์ฌ ํ์ดํ๋ผ์ธ์ ์๋๋ฅผ ์ต์ ํํ๊ณ , ๋ ์ฑ๋ฅ์ด ์ข์ ์ค์ผ์ค๋ฌ๋ฅผ ์ฌ์ฉํ์ฌ ์ถ๋ก ๋จ๊ณ์ ์๋ฅผ ์ค์ด๊ณ , attention slicing์ ํ์ฑํํ์ฌ ๋ฉ๋ชจ๋ฆฌ ์๋น๋ฅผ ์ค์ด๋ ๋ฐฉ๋ฒ์ ๋ฐฐ์ ์ต๋๋ค. ์ด์ ์์ฑ๋ ์ด๋ฏธ์ง์ ํ์ง์ ๊ฐ์ ํ๋ ๋ฐฉ๋ฒ์ ๋ํด ์ง์ค์ ์ผ๋ก ์์๋ณด๊ฒ ์ต๋๋ค.
### ๋ ๋์ ์ฒดํฌํฌ์ธํธ
๊ฐ์ฅ ํ์คํ ๋จ๊ณ๋ ๋ ๋์ ์ฒดํฌํฌ์ธํธ๋ฅผ ์ฌ์ฉํ๋ ๊ฒ์
๋๋ค. Stable Diffusion ๋ชจ๋ธ์ ์ข์ ์ถ๋ฐ์ ์ด๋ฉฐ, ๊ณต์ ์ถ์ ์ดํ ๋ช ๊ฐ์ง ๊ฐ์ ๋ ๋ฒ์ ๋ ์ถ์๋์์ต๋๋ค. ํ์ง๋ง ์ต์ ๋ฒ์ ์ ์ฌ์ฉํ๋ค๊ณ ํด์ ์๋์ผ๋ก ๋ ๋์ ๊ฒฐ๊ณผ๋ฅผ ์ป์ ์ ์๋ ๊ฒ์ ์๋๋๋ค. ์ฌ์ ํ ๋ค์ํ ์ฒดํฌํฌ์ธํธ๋ฅผ ์ง์ ์คํํด๋ณด๊ณ , [negative prompts](https://minimaxir.com/2022/11/stable-diffusion-negative-prompt/) ์ฌ์ฉ ๋ฑ ์ฝ๊ฐ์ ์กฐ์ฌ๋ฅผ ํตํด ์ต์์ ๊ฒฐ๊ณผ๋ฅผ ์ป์ด์ผ ํฉ๋๋ค.
์ด ๋ถ์ผ๊ฐ ์ฑ์ฅํจ์ ๋ฐ๋ผ ํน์ ์คํ์ผ์ ์ฐ์ถํ ์ ์๋๋ก ์ธ๋ฐํ๊ฒ ์กฐ์ ๋ ๊ณ ํ์ง ์ฒดํฌํฌ์ธํธ๊ฐ ์ ์ ๋ ๋ง์์ง๊ณ ์์ต๋๋ค. [Hub](https://huggingface.co/models?library=diffusers&sort=downloads)์ [Diffusers Gallery](https://huggingface.co/spaces/huggingface-projects/diffusers-gallery)๋ฅผ ๋๋ฌ๋ณด๊ณ ๊ด์ฌ ์๋ ๊ฒ์ ์ฐพ์๋ณด์ธ์!
### ๋ ๋์ ํ์ดํ๋ผ์ธ ๊ตฌ์ฑ ์์
ํ์ฌ ํ์ดํ๋ผ์ธ ๊ตฌ์ฑ ์์๋ฅผ ์ต์ ๋ฒ์ ์ผ๋ก ๊ต์ฒดํด ๋ณผ ์๋ ์์ต๋๋ค. Stability AI์ ์ต์ [autodecoder](https://huggingface.co/stabilityai/stable-diffusion-2-1/tree/main/vae)๋ฅผ ํ์ดํ๋ผ์ธ์ ๋ก๋ํ๊ณ ๋ช ๊ฐ์ง ์ด๋ฏธ์ง๋ฅผ ์์ฑํด ๋ณด๊ฒ ์ต๋๋ค:
```python
from diffusers import AutoencoderKL
vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16).to("cuda")
pipeline.vae = vae
images = pipeline(**get_inputs(batch_size=8)).images
image_grid(images, rows=2, cols=4)
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_6.png">
</div>
### ๋ ๋์ ํ๋กฌํํธ ์์ง๋์ด๋ง
์ด๋ฏธ์ง๋ฅผ ์์ฑํ๋ ๋ฐ ์ฌ์ฉํ๋ ํ
์คํธ ํ๋กฌํํธ๋ *prompt engineering*์ด๋ผ๊ณ ํ ์ ๋๋ก ๋งค์ฐ ์ค์ํฉ๋๋ค. ํ๋กฌํํธ ์์ง๋์ด๋ง ์ ๊ณ ๋ คํด์ผ ํ ๋ช ๊ฐ์ง ์ฌํญ์ ๋ค์๊ณผ ๊ฐ์ต๋๋ค:
- ์์ฑํ๋ ค๋ ์ด๋ฏธ์ง ๋๋ ์ ์ฌํ ์ด๋ฏธ์ง๊ฐ ์ธํฐ๋ท์ ์ด๋ป๊ฒ ์ ์ฅ๋์ด ์๋๊ฐ?
- ๋ด๊ฐ ์ํ๋ ์คํ์ผ๋ก ๋ชจ๋ธ์ ์ ๋ํ๊ธฐ ์ํด ์ด๋ค ์ถ๊ฐ ์ธ๋ถ ์ ๋ณด๋ฅผ ์ ๊ณตํ ์ ์๋๊ฐ?
์ด๋ฅผ ์ผ๋์ ๋๊ณ ์์๊ณผ ๋ ๋์ ํ์ง์ ๋ํ
์ผ์ ํฌํจํ๋๋ก ํ๋กฌํํธ๋ฅผ ๊ฐ์ ํด ๋ด
์๋ค:
```python
prompt += ", tribal panther make up, blue on red, side profile, looking away, serious eyes"
prompt += " 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta"
```
์๋ก์ด ํ๋กฌํํธ๋ก ์ด๋ฏธ์ง ๋ฐฐ์น๋ฅผ ์์ฑํฉ๋๋ค:
```python
images = pipeline(**get_inputs(batch_size=8)).images
image_grid(images, rows=2, cols=4)
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_7.png">
</div>
๊ฝค ์ธ์์ ์
๋๋ค! `1`์ ์๋๋ฅผ ๊ฐ์ง `Generator`์ ํด๋นํ๋ ๋ ๋ฒ์งธ ์ด๋ฏธ์ง์ ํผ์ฌ์ฒด์ ๋์ด์ ๋ํ ํ
์คํธ๋ฅผ ์ถ๊ฐํ์ฌ ์กฐ๊ธ ๋ ์กฐ์ ํด ๋ณด๊ฒ ์ต๋๋ค:
```python
prompts = [
"portrait photo of the oldest warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta",
"portrait photo of a old warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta",
"portrait photo of a warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta",
"portrait photo of a young warrior chief, tribal panther make up, blue on red, side profile, looking away, serious eyes 50mm portrait photography, hard rim lighting photography--beta --ar 2:3 --beta --upbeta",
]
generator = [torch.Generator("cuda").manual_seed(1) for _ in range(len(prompts))]
images = pipeline(prompt=prompts, generator=generator, num_inference_steps=25).images
image_grid(images)
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/stable_diffusion_101/sd_101_8.png">
</div>
## ๋ค์ ๋จ๊ณ
์ด ํํ ๋ฆฌ์ผ์์๋ ๊ณ์ฐ ๋ฐ ๋ฉ๋ชจ๋ฆฌ ํจ์จ์ ๋์ด๊ณ ์์ฑ๋ ์ถ๋ ฅ์ ํ์ง์ ๊ฐ์ ํ๊ธฐ ์ํด [`DiffusionPipeline`]์ ์ต์ ํํ๋ ๋ฐฉ๋ฒ์ ๋ฐฐ์ ์ต๋๋ค. ํ์ดํ๋ผ์ธ์ ๋ ๋น ๋ฅด๊ฒ ๋ง๋๋ ๋ฐ ๊ด์ฌ์ด ์๋ค๋ฉด ๋ค์ ๋ฆฌ์์ค๋ฅผ ์ดํด๋ณด์ธ์:
- [PyTorch 2.0](./optimization/torch2.0) ๋ฐ [`torch.compile`](https://pytorch.org/docs/stable/generated/torch.compile.html)์ด ์ด๋ป๊ฒ ์ถ๋ก ์๋๋ฅผ 5~300% ํฅ์์ํฌ ์ ์๋์ง ์์๋ณด์ธ์. A100 GPU์์๋ ์ถ๋ก ์๋๊ฐ ์ต๋ 50%๊น์ง ๋นจ๋ผ์ง ์ ์์ต๋๋ค!
- PyTorch 2๋ฅผ ์ฌ์ฉํ ์ ์๋ ๊ฒฝ์ฐ, [xFormers](./optimization/xformers)๋ฅผ ์ค์นํ๋ ๊ฒ์ด ์ข์ต๋๋ค. ๋ฉ๋ชจ๋ฆฌ ํจ์จ์ ์ธ ์ดํ
์
๋ฉ์ปค๋์ฆ์ PyTorch 1.13.1๊ณผ ํจ๊ป ์ฌ์ฉํ๋ฉด ์๋๊ฐ ๋นจ๋ผ์ง๊ณ ๋ฉ๋ชจ๋ฆฌ ์๋น๊ฐ ์ค์ด๋ญ๋๋ค.
- ๋ชจ๋ธ ์คํ๋ก๋ฉ๊ณผ ๊ฐ์ ๋ค๋ฅธ ์ต์ ํ ๊ธฐ๋ฒ์ [์ด ๊ฐ์ด๋](./optimization/fp16)์์ ๋ค๋ฃจ๊ณ ์์ต๋๋ค.
| 0
|
hf_public_repos/diffusers/docs/source
|
hf_public_repos/diffusers/docs/source/ko/index.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
<p align="center">
<br>
<img src="https://raw.githubusercontent.com/huggingface/diffusers/77aadfee6a891ab9fcfb780f87c693f7a5beeb8e/docs/source/imgs/diffusers_library.jpg" width="400"/>
<br>
</p>
# Diffusers
๐ค Diffusers๋ ์ด๋ฏธ์ง, ์ค๋์ค, ์ฌ์ง์ด ๋ถ์์ 3D ๊ตฌ์กฐ๋ฅผ ์์ฑํ๊ธฐ ์ํ ์ต์ฒจ๋จ ์ฌ์ ํ๋ จ๋ diffusion ๋ชจ๋ธ์ ์ํ ๋ผ์ด๋ธ๋ฌ๋ฆฌ์
๋๋ค. ๊ฐ๋จํ ์ถ๋ก ์๋ฃจ์
์ ์ฐพ๊ณ ์๋ , ์์ฒด diffusion ๋ชจ๋ธ์ ํ๋ จํ๊ณ ์ถ๋ , ๐ค Diffusers๋ ๋ ๊ฐ์ง ๋ชจ๋๋ฅผ ์ง์ํ๋ ๋ชจ๋์ ํด๋ฐ์ค์
๋๋ค. ์ ํฌ ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ [์ฑ๋ฅ๋ณด๋ค ์ฌ์ฉ์ฑ](conceptual/philosophy#usability-over-performance), [๊ฐํธํจ๋ณด๋ค ๋จ์ํจ](conceptual/philosophy#simple-over-easy), ๊ทธ๋ฆฌ๊ณ [์ถ์ํ๋ณด๋ค ์ฌ์ฉ์ ์ง์ ๊ฐ๋ฅ์ฑ](conceptual/philosophy#tweakable-contributorfriendly-over-abstraction)์ ์ค์ ์ ๋๊ณ ์ค๊ณ๋์์ต๋๋ค.
์ด ๋ผ์ด๋ธ๋ฌ๋ฆฌ์๋ ์ธ ๊ฐ์ง ์ฃผ์ ๊ตฌ์ฑ ์์๊ฐ ์์ต๋๋ค:
- ๋ช ์ค์ ์ฝ๋๋ง์ผ๋ก ์ถ๋ก ํ ์ ์๋ ์ต์ฒจ๋จ [diffusion ํ์ดํ๋ผ์ธ](api/pipelines/overview).
- ์์ฑ ์๋์ ํ์ง ๊ฐ์ ๊ท ํ์ ๋ง์ถ๊ธฐ ์ํด ์ํธ๊ตํ์ ์ผ๋ก ์ฌ์ฉํ ์ ์๋ [๋
ธ์ด์ฆ ์ค์ผ์ค๋ฌ](api/schedulers/overview).
- ๋น๋ฉ ๋ธ๋ก์ผ๋ก ์ฌ์ฉํ ์ ์๊ณ ์ค์ผ์ค๋ฌ์ ๊ฒฐํฉํ์ฌ ์์ฒด์ ์ธ end-to-end diffusion ์์คํ
์ ๋ง๋ค ์ ์๋ ์ฌ์ ํ์ต๋ [๋ชจ๋ธ](api/models).
<div class="mt-10">
<div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5">
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./tutorials/tutorial_overview"
><div class="w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Tutorials</div>
<p class="text-gray-700">๊ฒฐ๊ณผ๋ฌผ์ ์์ฑํ๊ณ , ๋๋ง์ diffusion ์์คํ
์ ๊ตฌ์ถํ๊ณ , ํ์ฐ ๋ชจ๋ธ์ ํ๋ จํ๋ ๋ฐ ํ์ํ ๊ธฐ๋ณธ ๊ธฐ์ ์ ๋ฐฐ์๋ณด์ธ์. ๐ค Diffusers๋ฅผ ์ฒ์ ์ฌ์ฉํ๋ ๊ฒฝ์ฐ ์ฌ๊ธฐ์์ ์์ํ๋ ๊ฒ์ด ์ข์ต๋๋ค!</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./using-diffusers/loading_overview"
><div class="w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">How-to guides</div>
<p class="text-gray-700">ํ์ดํ๋ผ์ธ, ๋ชจ๋ธ, ์ค์ผ์ค๋ฌ๋ฅผ ๋ก๋ํ๋ ๋ฐ ๋์์ด ๋๋ ์ค์ฉ์ ์ธ ๊ฐ์ด๋์
๋๋ค. ๋ํ ํน์ ์์
์ ํ์ดํ๋ผ์ธ์ ์ฌ์ฉํ๊ณ , ์ถ๋ ฅ ์์ฑ ๋ฐฉ์์ ์ ์ดํ๊ณ , ์ถ๋ก ์๋์ ๋ง๊ฒ ์ต์ ํํ๊ณ , ๋ค์ํ ํ์ต ๊ธฐ๋ฒ์ ์ฌ์ฉํ๋ ๋ฐฉ๋ฒ๋ ๋ฐฐ์ธ ์ ์์ต๋๋ค.</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./conceptual/philosophy"
><div class="w-full text-center bg-gradient-to-br from-pink-400 to-pink-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Conceptual guides</div>
<p class="text-gray-700">๋ผ์ด๋ธ๋ฌ๋ฆฌ๊ฐ ์ ์ด๋ฐ ๋ฐฉ์์ผ๋ก ์ค๊ณ๋์๋์ง ์ดํดํ๊ณ , ๋ผ์ด๋ธ๋ฌ๋ฆฌ ์ด์ฉ์ ๋ํ ์ค๋ฆฌ์ ๊ฐ์ด๋๋ผ์ธ๊ณผ ์์ ๊ตฌํ์ ๋ํด ์์ธํ ์์๋ณด์ธ์.</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./api/models"
><div class="w-full text-center bg-gradient-to-br from-purple-400 to-purple-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Reference</div>
<p class="text-gray-700">๐ค Diffusers ํด๋์ค ๋ฐ ๋ฉ์๋์ ์๋ ๋ฐฉ์์ ๋ํ ๊ธฐ์ ์ค๋ช
.</p>
</a>
</div>
</div>
## Supported pipelines
| Pipeline | Paper/Repository | Tasks |
|---|---|:---:|
| [alt_diffusion](./api/pipelines/alt_diffusion) | [AltCLIP: Altering the Language Encoder in CLIP for Extended Language Capabilities](https://arxiv.org/abs/2211.06679) | Image-to-Image Text-Guided Generation |
| [audio_diffusion](./api/pipelines/audio_diffusion) | [Audio Diffusion](https://github.com/teticio/audio-diffusion.git) | Unconditional Audio Generation |
| [controlnet](./api/pipelines/stable_diffusion/controlnet) | [Adding Conditional Control to Text-to-Image Diffusion Models](https://arxiv.org/abs/2302.05543) | Image-to-Image Text-Guided Generation |
| [cycle_diffusion](./api/pipelines/cycle_diffusion) | [Unifying Diffusion Models' Latent Space, with Applications to CycleDiffusion and Guidance](https://arxiv.org/abs/2210.05559) | Image-to-Image Text-Guided Generation |
| [dance_diffusion](./api/pipelines/dance_diffusion) | [Dance Diffusion](https://github.com/williamberman/diffusers.git) | Unconditional Audio Generation |
| [ddpm](./api/pipelines/ddpm) | [Denoising Diffusion Probabilistic Models](https://arxiv.org/abs/2006.11239) | Unconditional Image Generation |
| [ddim](./api/pipelines/ddim) | [Denoising Diffusion Implicit Models](https://arxiv.org/abs/2010.02502) | Unconditional Image Generation |
| [if](./if) | [**IF**](./api/pipelines/if) | Image Generation |
| [if_img2img](./if) | [**IF**](./api/pipelines/if) | Image-to-Image Generation |
| [if_inpainting](./if) | [**IF**](./api/pipelines/if) | Image-to-Image Generation |
| [latent_diffusion](./api/pipelines/latent_diffusion) | [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752)| Text-to-Image Generation |
| [latent_diffusion](./api/pipelines/latent_diffusion) | [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752)| Super Resolution Image-to-Image |
| [latent_diffusion_uncond](./api/pipelines/latent_diffusion_uncond) | [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) | Unconditional Image Generation |
| [paint_by_example](./api/pipelines/paint_by_example) | [Paint by Example: Exemplar-based Image Editing with Diffusion Models](https://arxiv.org/abs/2211.13227) | Image-Guided Image Inpainting |
| [pndm](./api/pipelines/pndm) | [Pseudo Numerical Methods for Diffusion Models on Manifolds](https://arxiv.org/abs/2202.09778) | Unconditional Image Generation |
| [score_sde_ve](./api/pipelines/score_sde_ve) | [Score-Based Generative Modeling through Stochastic Differential Equations](https://openreview.net/forum?id=PxTIG12RRHS) | Unconditional Image Generation |
| [score_sde_vp](./api/pipelines/score_sde_vp) | [Score-Based Generative Modeling through Stochastic Differential Equations](https://openreview.net/forum?id=PxTIG12RRHS) | Unconditional Image Generation |
| [semantic_stable_diffusion](./api/pipelines/semantic_stable_diffusion) | [Semantic Guidance](https://arxiv.org/abs/2301.12247) | Text-Guided Generation |
| [stable_diffusion_text2img](./api/pipelines/stable_diffusion/text2img) | [Stable Diffusion](https://stability.ai/blog/stable-diffusion-public-release) | Text-to-Image Generation |
| [stable_diffusion_img2img](./api/pipelines/stable_diffusion/img2img) | [Stable Diffusion](https://stability.ai/blog/stable-diffusion-public-release) | Image-to-Image Text-Guided Generation |
| [stable_diffusion_inpaint](./api/pipelines/stable_diffusion/inpaint) | [Stable Diffusion](https://stability.ai/blog/stable-diffusion-public-release) | Text-Guided Image Inpainting |
| [stable_diffusion_panorama](./api/pipelines/stable_diffusion/panorama) | [MultiDiffusion](https://multidiffusion.github.io/) | Text-to-Panorama Generation |
| [stable_diffusion_pix2pix](./api/pipelines/stable_diffusion/pix2pix) | [InstructPix2Pix: Learning to Follow Image Editing Instructions](https://arxiv.org/abs/2211.09800) | Text-Guided Image Editing|
| [stable_diffusion_pix2pix_zero](./api/pipelines/stable_diffusion/pix2pix_zero) | [Zero-shot Image-to-Image Translation](https://pix2pixzero.github.io/) | Text-Guided Image Editing |
| [stable_diffusion_attend_and_excite](./api/pipelines/stable_diffusion/attend_and_excite) | [Attend-and-Excite: Attention-Based Semantic Guidance for Text-to-Image Diffusion Models](https://arxiv.org/abs/2301.13826) | Text-to-Image Generation |
| [stable_diffusion_self_attention_guidance](./api/pipelines/stable_diffusion/self_attention_guidance) | [Improving Sample Quality of Diffusion Models Using Self-Attention Guidance](https://arxiv.org/abs/2210.00939) | Text-to-Image Generation Unconditional Image Generation |
| [stable_diffusion_image_variation](./stable_diffusion/image_variation) | [Stable Diffusion Image Variations](https://github.com/LambdaLabsML/lambda-diffusers#stable-diffusion-image-variations) | Image-to-Image Generation |
| [stable_diffusion_latent_upscale](./stable_diffusion/latent_upscale) | [Stable Diffusion Latent Upscaler](https://twitter.com/StabilityAI/status/1590531958815064065) | Text-Guided Super Resolution Image-to-Image |
| [stable_diffusion_model_editing](./api/pipelines/stable_diffusion/model_editing) | [Editing Implicit Assumptions in Text-to-Image Diffusion Models](https://time-diffusion.github.io/) | Text-to-Image Model Editing |
| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [Stable Diffusion 2](https://stability.ai/blog/stable-diffusion-v2-release) | Text-to-Image Generation |
| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [Stable Diffusion 2](https://stability.ai/blog/stable-diffusion-v2-release) | Text-Guided Image Inpainting |
| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [Depth-Conditional Stable Diffusion](https://github.com/Stability-AI/stablediffusion#depth-conditional-stable-diffusion) | Depth-to-Image Generation |
| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [Stable Diffusion 2](https://stability.ai/blog/stable-diffusion-v2-release) | Text-Guided Super Resolution Image-to-Image |
| [stable_diffusion_safe](./api/pipelines/stable_diffusion_safe) | [Safe Stable Diffusion](https://arxiv.org/abs/2211.05105) | Text-Guided Generation |
| [stable_unclip](./stable_unclip) | Stable unCLIP | Text-to-Image Generation |
| [stable_unclip](./stable_unclip) | Stable unCLIP | Image-to-Image Text-Guided Generation |
| [stochastic_karras_ve](./api/pipelines/stochastic_karras_ve) | [Elucidating the Design Space of Diffusion-Based Generative Models](https://arxiv.org/abs/2206.00364) | Unconditional Image Generation |
| [text_to_video_sd](./api/pipelines/text_to_video) | [Modelscope's Text-to-video-synthesis Model in Open Domain](https://modelscope.cn/models/damo/text-to-video-synthesis/summary) | Text-to-Video Generation |
| [unclip](./api/pipelines/unclip) | [Hierarchical Text-Conditional Image Generation with CLIP Latents](https://arxiv.org/abs/2204.06125)(implementation by [kakaobrain](https://github.com/kakaobrain/karlo)) | Text-to-Image Generation |
| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Text-to-Image Generation |
| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Image Variations Generation |
| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Dual Image and Text Guided Generation |
| [vq_diffusion](./api/pipelines/vq_diffusion) | [Vector Quantized Diffusion Model for Text-to-Image Synthesis](https://arxiv.org/abs/2111.14822) | Text-to-Image Generation |
| 0
|
hf_public_repos/diffusers/docs/source
|
hf_public_repos/diffusers/docs/source/ko/quicktour.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
[[open-in-colab]]
# ํ์ด๋ณด๊ธฐ
Diffusion ๋ชจ๋ธ์ ์ด๋ฏธ์ง๋ ์ค๋์ค์ ๊ฐ์ ๊ด์ฌ ์ํ๋ค์ ์์ฑํ๊ธฐ ์ํด ๋๋ค ๊ฐ์ฐ์์ ๋
ธ์ด์ฆ๋ฅผ ๋จ๊ณ๋ณ๋ก ์ ๊ฑฐํ๋๋ก ํ์ต๋ฉ๋๋ค. ์ด๋ก ์ธํด ์์ฑ AI์ ๋ํ ๊ด์ฌ์ด ๋งค์ฐ ๋์์ก์ผ๋ฉฐ, ์ธํฐ๋ท์์ diffusion ์์ฑ ์ด๋ฏธ์ง์ ์๋ฅผ ๋ณธ ์ ์ด ์์ ๊ฒ์
๋๋ค. ๐งจ Diffusers๋ ๋๊ตฌ๋ diffusion ๋ชจ๋ธ๋ค์ ๋๋ฆฌ ์ด์ฉํ ์ ์๋๋ก ํ๊ธฐ ์ํ ๋ผ์ด๋ธ๋ฌ๋ฆฌ์
๋๋ค.
๊ฐ๋ฐ์๋ ์ผ๋ฐ ์ฌ์ฉ์๋ ์ด ํ์ด๋ณด๊ธฐ๋ฅผ ํตํด ๐งจ diffusers๋ฅผ ์๊ฐํ๊ณ ๋น ๋ฅด๊ฒ ์์ฑํ ์ ์๋๋ก ๋์๋๋ฆฝ๋๋ค! ์์์ผ ํ ๋ผ์ด๋ธ๋ฌ๋ฆฌ์ ์ฃผ์ ๊ตฌ์ฑ ์์๋ ํฌ๊ฒ ์ธ ๊ฐ์ง์
๋๋ค:
* [`DiffusionPipeline`]์ ์ถ๋ก ์ ์ํด ์ฌ์ ํ์ต๋ diffusion ๋ชจ๋ธ์์ ์ํ์ ๋น ๋ฅด๊ฒ ์์ฑํ๋๋ก ์ค๊ณ๋ ๋์ ์์ค์ ์๋ํฌ์๋ ํด๋์ค์
๋๋ค.
* Diffusion ์์คํ
์์ฑ์ ์ํ ๋น๋ฉ ๋ธ๋ก์ผ๋ก ์ฌ์ฉํ ์ ์๋ ๋๋ฆฌ ์ฌ์ฉ๋๋ ์ฌ์ ํ์ต๋ [model](./api/models) ์ํคํ
์ฒ ๋ฐ ๋ชจ๋.
* ๋ค์ํ [schedulers](./api/schedulers/overview) - ํ์ต์ ์ํด ๋
ธ์ด์ฆ๋ฅผ ์ถ๊ฐํ๋ ๋ฐฉ๋ฒ๊ณผ ์ถ๋ก ์ค์ ๋
ธ์ด์ฆ ์ ๊ฑฐ๋ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ๋ ๋ฐฉ๋ฒ์ ์ ์ดํ๋ ์๊ณ ๋ฆฌ์ฆ์
๋๋ค.
ํ์ด๋ณด๊ธฐ์์๋ ์ถ๋ก ์ ์ํด [`DiffusionPipeline`]์ ์ฌ์ฉํ๋ ๋ฐฉ๋ฒ์ ๋ณด์ฌ์ค ๋ค์, ๋ชจ๋ธ๊ณผ ์ค์ผ์ค๋ฌ๋ฅผ ๊ฒฐํฉํ์ฌ [`DiffusionPipeline`] ๋ด๋ถ์์ ์ผ์ด๋๋ ์ผ์ ๋ณต์ ํ๋ ๋ฐฉ๋ฒ์ ์๋ดํฉ๋๋ค.
<Tip>
ํ์ด๋ณด๊ธฐ๋ ๊ฐ๊ฒฐํ ๋ฒ์ ์ ๐งจ Diffusers ์๊ฐ๋ก์ [๋
ธํธ๋ถ](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/diffusers_intro.ipynb) ๋น ๋ฅด๊ฒ ์์ํ ์ ์๋๋ก ๋์๋๋ฆฝ๋๋ค. ๋ํจ์ ์ ๋ชฉํ, ๋์์ธ ์ฒ ํ, ํต์ฌ API์ ๋ํ ์ถ๊ฐ ์ธ๋ถ ์ ๋ณด๋ฅผ ์์ธํ ์์๋ณด๋ ค๋ฉด ๋
ธํธ๋ถ์ ํ์ธํ์ธ์!
</Tip>
์์ํ๊ธฐ ์ ์ ํ์ํ ๋ผ์ด๋ธ๋ฌ๋ฆฌ๊ฐ ๋ชจ๋ ์ค์น๋์ด ์๋์ง ํ์ธํ์ธ์:
```py
# ์ฃผ์ ํ์ด์ Colab์ ํ์ํ ๋ผ์ด๋ธ๋ฌ๋ฆฌ ์ค์นํ๊ธฐ.
#!pip install --upgrade diffusers accelerate transformers
```
- [๐ค Accelerate](https://huggingface.co/docs/accelerate/index)๋ ์ถ๋ก ๋ฐ ํ์ต์ ์ํ ๋ชจ๋ธ ๋ก๋ฉ ์๋๋ฅผ ๋์ฌ์ค๋๋ค.
- [๐ค Transformers](https://huggingface.co/docs/transformers/index)๋ [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview)๊ณผ ๊ฐ์ด ๊ฐ์ฅ ๋ง์ด ์ฌ์ฉ๋๋ diffusion ๋ชจ๋ธ์ ์คํํ๋ ๋ฐ ํ์ํฉ๋๋ค.
## DiffusionPipeline
[`DiffusionPipeline`] ์ ์ถ๋ก ์ ์ํด ์ฌ์ ํ์ต๋ diffusion ์์คํ
์ ์ฌ์ฉํ๋ ๊ฐ์ฅ ์ฌ์ด ๋ฐฉ๋ฒ์
๋๋ค. ๋ชจ๋ธ๊ณผ ์ค์ผ์ค๋ฌ๋ฅผ ํฌํจํ๋ ์๋ ํฌ ์๋ ์์คํ
์
๋๋ค. ๋ค์ํ ์์
์ [`DiffusionPipeline`]์ ๋ฐ๋ก ์ฌ์ฉํ ์ ์์ต๋๋ค. ์๋ ํ์์ ์ง์๋๋ ๋ช ๊ฐ์ง ์์
์ ์ดํด๋ณด๊ณ , ์ง์๋๋ ์์
์ ์ ์ฒด ๋ชฉ๋ก์ [๐งจ Diffusers Summary](./api/pipelines/overview#diffusers-summary) ํ์์ ํ์ธํ ์ ์์ต๋๋ค.
| **Task** | **Description** | **Pipeline**
|------------------------------|--------------------------------------------------------------------------------------------------------------|-----------------|
| Unconditional Image Generation | generate an image from Gaussian noise | [unconditional_image_generation](./using-diffusers/unconditional_image_generation) |
| Text-Guided Image Generation | generate an image given a text prompt | [conditional_image_generation](./using-diffusers/conditional_image_generation) |
| Text-Guided Image-to-Image Translation | adapt an image guided by a text prompt | [img2img](./using-diffusers/img2img) |
| Text-Guided Image-Inpainting | fill the masked part of an image given the image, the mask and a text prompt | [inpaint](./using-diffusers/inpaint) |
| Text-Guided Depth-to-Image Translation | adapt parts of an image guided by a text prompt while preserving structure via depth estimation | [depth2img](./using-diffusers/depth2img) |
๋จผ์ [`DiffusionPipeline`]์ ์ธ์คํด์ค๋ฅผ ์์ฑํ๊ณ ๋ค์ด๋ก๋ํ ํ์ดํ๋ผ์ธ ์ฒดํฌํฌ์ธํธ๋ฅผ ์ง์ ํฉ๋๋ค.
ํ๊น
ํ์ด์ค ํ๋ธ์ ์ ์ฅ๋ ๋ชจ๋ [checkpoint](https://huggingface.co/models?library=diffusers&sort=downloads)์ ๋ํด [`DiffusionPipeline`]์ ์ฌ์ฉํ ์ ์์ต๋๋ค.
์ด ํ์ด๋ณด๊ธฐ์์๋ text-to-image ์์ฑ์ ์ํ [`stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) ์ฒดํฌํฌ์ธํธ๋ฅผ ๋ก๋ํฉ๋๋ค.
<Tip warning={true}>
[Stable Diffusion](https://huggingface.co/CompVis/stable-diffusion) ๋ชจ๋ธ์ ๊ฒฝ์ฐ, ๋ชจ๋ธ์ ์คํํ๊ธฐ ์ ์ [๋ผ์ด์ ์ค](https://huggingface.co/spaces/CompVis/stable-diffusion-license)๋ฅผ ๋จผ์ ์ฃผ์ ๊น๊ฒ ์ฝ์ด์ฃผ์ธ์. ๐งจ Diffusers๋ ๋ถ์พํ๊ฑฐ๋ ์ ํดํ ์ฝํ
์ธ ๋ฅผ ๋ฐฉ์งํ๊ธฐ ์ํด [`safety_checker`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py)๋ฅผ ๊ตฌํํ๊ณ ์์ง๋ง, ๋ชจ๋ธ์ ํฅ์๋ ์ด๋ฏธ์ง ์์ฑ ๊ธฐ๋ฅ์ผ๋ก ์ธํด ์ฌ์ ํ ์ ์ฌ์ ์ผ๋ก ์ ํดํ ์ฝํ
์ธ ๊ฐ ์์ฑ๋ ์ ์์ต๋๋ค.
</Tip>
[`~DiffusionPipeline.from_pretrained`] ๋ฐฉ๋ฒ์ผ๋ก ๋ชจ๋ธ ๋ก๋ํ๊ธฐ:
```python
>>> from diffusers import DiffusionPipeline
>>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
```
The [`DiffusionPipeline`]์ ๋ชจ๋ ๋ชจ๋ธ๋ง, ํ ํฐํ, ์ค์ผ์ค๋ง ์ปดํฌ๋ํธ๋ฅผ ๋ค์ด๋ก๋ํ๊ณ ์บ์ํฉ๋๋ค. Stable Diffusion Pipeline์ ๋ฌด์๋ณด๋ค๋ [`UNet2DConditionModel`]๊ณผ [`PNDMScheduler`]๋ก ๊ตฌ์ฑ๋์ด ์์์ ์ ์ ์์ต๋๋ค:
```py
>>> pipeline
StableDiffusionPipeline {
"_class_name": "StableDiffusionPipeline",
"_diffusers_version": "0.13.1",
...,
"scheduler": [
"diffusers",
"PNDMScheduler"
],
...,
"unet": [
"diffusers",
"UNet2DConditionModel"
],
"vae": [
"diffusers",
"AutoencoderKL"
]
}
```
์ด ๋ชจ๋ธ์ ์ฝ 14์ต ๊ฐ์ ํ๋ผ๋ฏธํฐ๋ก ๊ตฌ์ฑ๋์ด ์์ผ๋ฏ๋ก GPU์์ ํ์ดํ๋ผ์ธ์ ์คํํ ๊ฒ์ ๊ฐ๋ ฅํ ๊ถ์ฅํฉ๋๋ค.
PyTorch์์์ ๋ง์ฐฌ๊ฐ์ง๋ก ์ ๋๋ ์ดํฐ ๊ฐ์ฒด๋ฅผ GPU๋ก ์ด๋ํ ์ ์์ต๋๋ค:
```python
>>> pipeline.to("cuda")
```
์ด์ `ํ์ดํ๋ผ์ธ`์ ํ
์คํธ ํ๋กฌํํธ๋ฅผ ์ ๋ฌํ์ฌ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ ๋ค์ ๋
ธ์ด์ฆ๊ฐ ์ ๊ฑฐ๋ ์ด๋ฏธ์ง์ ์ก์ธ์คํ ์ ์์ต๋๋ค. ๊ธฐ๋ณธ์ ์ผ๋ก ์ด๋ฏธ์ง ์ถ๋ ฅ์ [`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html?highlight=image#the-image-class) ๊ฐ์ฒด๋ก ๊ฐ์ธ์ง๋๋ค.
```python
>>> image = pipeline("An image of a squirrel in Picasso style").images[0]
>>> image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/image_of_squirrel_painting.png"/>
</div>
`save`๋ฅผ ํธ์ถํ์ฌ ์ด๋ฏธ์ง๋ฅผ ์ ์ฅํฉ๋๋ค:
```python
>>> image.save("image_of_squirrel_painting.png")
```
### ๋ก์ปฌ ํ์ดํ๋ผ์ธ
ํ์ดํ๋ผ์ธ์ ๋ก์ปฌ์์ ์ฌ์ฉํ ์๋ ์์ต๋๋ค. ์ ์ผํ ์ฐจ์ด์ ์ ๊ฐ์ค์น๋ฅผ ๋จผ์ ๋ค์ด๋ก๋ํด์ผ ํ๋ค๋ ์ ์
๋๋ค:
```bash
!git lfs install
!git clone https://huggingface.co/runwayml/stable-diffusion-v1-5
```
๊ทธ๋ฐ ๋ค์ ์ ์ฅ๋ ๊ฐ์ค์น๋ฅผ ํ์ดํ๋ผ์ธ์ ๋ก๋ํฉ๋๋ค:
```python
>>> pipeline = DiffusionPipeline.from_pretrained("./stable-diffusion-v1-5")
```
์ด์ ์ ์น์
์์์ ๊ฐ์ด ํ์ดํ๋ผ์ธ์ ์คํํ ์ ์์ต๋๋ค.
### ์ค์ผ์ค๋ฌ ๊ต์ฒด
์ค์ผ์ค๋ฌ๋ง๋ค ๋
ธ์ด์ฆ ์ ๊ฑฐ ์๋์ ํ์ง์ด ์๋ก ๋ค๋ฆ
๋๋ค. ์์ ์๊ฒ ๊ฐ์ฅ ์ ํฉํ ์ค์ผ์ค๋ฌ๋ฅผ ์ฐพ๋ ๊ฐ์ฅ ์ข์ ๋ฐฉ๋ฒ์ ์ง์ ์ฌ์ฉํด ๋ณด๋ ๊ฒ์
๋๋ค! ๐งจ Diffusers์ ์ฃผ์ ๊ธฐ๋ฅ ์ค ํ๋๋ ์ค์ผ์ค๋ฌ ๊ฐ์ ์ฝ๊ฒ ์ ํ์ด ๊ฐ๋ฅํ๋ค๋ ๊ฒ์
๋๋ค. ์๋ฅผ ๋ค์ด, ๊ธฐ๋ณธ ์ค์ผ์ค๋ฌ์ธ [`PNDMScheduler`]๋ฅผ [`EulerDiscreteScheduler`]๋ก ๋ฐ๊พธ๋ ค๋ฉด, [`~diffusers.ConfigMixin.from_config`] ๋ฉ์๋๋ฅผ ์ฌ์ฉํ์ฌ ๋ก๋ํ์ธ์:
```py
>>> from diffusers import EulerDiscreteScheduler
>>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
>>> pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config)
```
์ ์ค์ผ์ค๋ฌ๋ก ์ด๋ฏธ์ง๋ฅผ ์์ฑํด๋ณด๊ณ ์ด๋ค ์ฐจ์ด๊ฐ ์๋์ง ํ์ธํด ๋ณด์ธ์!
๋ค์ ์น์
์์๋ ๋ชจ๋ธ๊ณผ ์ค์ผ์ค๋ฌ๋ผ๋ [`DiffusionPipeline`]์ ๊ตฌ์ฑํ๋ ์ปดํฌ๋ํธ๋ฅผ ์์ธํ ์ดํด๋ณด๊ณ ์ด๋ฌํ ์ปดํฌ๋ํธ๋ฅผ ์ฌ์ฉํ์ฌ ๊ณ ์์ด ์ด๋ฏธ์ง๋ฅผ ์์ฑํ๋ ๋ฐฉ๋ฒ์ ๋ฐฐ์๋ณด๊ฒ ์ต๋๋ค.
## ๋ชจ๋ธ
๋๋ถ๋ถ์ ๋ชจ๋ธ์ ๋
ธ์ด์ฆ๊ฐ ์๋ ์ํ์ ๊ฐ์ ธ์ ๊ฐ ์๊ฐ ๊ฐ๊ฒฉ๋ง๋ค ๋
ธ์ด์ฆ๊ฐ ์ ์ ์ด๋ฏธ์ง์ ์
๋ ฅ ์ด๋ฏธ์ง ์ฌ์ด์ ์ฐจ์ด์ธ *๋
ธ์ด์ฆ ์์ฐจ*(๋ค๋ฅธ ๋ชจ๋ธ์ ์ด์ ์ํ์ ์ง์ ์์ธกํ๊ฑฐ๋ ์๋ ๋๋ [`v-prediction`](https://github.com/huggingface/diffusers/blob/5e5ce13e2f89ac45a0066cb3f369462a3cf1d9ef/src/diffusers/schedulers/scheduling_ddim.py#L110)์ ์์ธกํ๋ ํ์ต์ ํฉ๋๋ค)์ ์์ธกํฉ๋๋ค. ๋ชจ๋ธ์ ๋ฏน์ค ์ค ๋งค์นํ์ฌ ๋ค๋ฅธ diffusion ์์คํ
์ ๋ง๋ค ์ ์์ต๋๋ค.
๋ชจ๋ธ์ [`~ModelMixin.from_pretrained`] ๋ฉ์๋๋ก ์์๋๋ฉฐ, ์ด ๋ฉ์๋๋ ๋ชจ๋ธ ๊ฐ์ค์น๋ฅผ ๋ก์ปฌ์ ์บ์ํ์ฌ ๋ค์์ ๋ชจ๋ธ์ ๋ก๋ํ ๋ ๋ ๋น ๋ฅด๊ฒ ๋ก๋ํ ์ ์์ต๋๋ค. ํ์ด๋ณด๊ธฐ์์๋ ๊ณ ์์ด ์ด๋ฏธ์ง์ ๋ํด ํ์ต๋ ์ฒดํฌํฌ์ธํธ๊ฐ ์๋ ๊ธฐ๋ณธ์ ์ธ unconditional ์ด๋ฏธ์ง ์์ฑ ๋ชจ๋ธ์ธ [`UNet2DModel`]์ ๋ก๋ํฉ๋๋ค:
```py
>>> from diffusers import UNet2DModel
>>> repo_id = "google/ddpm-cat-256"
>>> model = UNet2DModel.from_pretrained(repo_id)
```
๋ชจ๋ธ ๋งค๊ฐ๋ณ์์ ์ก์ธ์คํ๋ ค๋ฉด `model.config`๋ฅผ ํธ์ถํฉ๋๋ค:
```py
>>> model.config
```
๋ชจ๋ธ ๊ตฌ์ฑ์ ๐ง ๊ณ ์ ๋ ๐ง ๋์
๋๋ฆฌ๋ก, ๋ชจ๋ธ์ด ์์ฑ๋ ํ์๋ ํด๋น ๋งค๊ฐ ๋ณ์๋ค์ ๋ณ๊ฒฝํ ์ ์์ต๋๋ค. ์ด๋ ์๋์ ์ธ ๊ฒ์ผ๋ก, ์ฒ์์ ๋ชจ๋ธ ์ํคํ
์ฒ๋ฅผ ์ ์ํ๋ ๋ฐ ์ฌ์ฉ๋ ๋งค๊ฐ๋ณ์๋ ๋์ผํ๊ฒ ์ ์งํ๋ฉด์ ๋ค๋ฅธ ๋งค๊ฐ๋ณ์๋ ์ถ๋ก ์ค์ ์กฐ์ ํ ์ ์๋๋ก ํ๊ธฐ ์ํ ๊ฒ์
๋๋ค.
๊ฐ์ฅ ์ค์ํ ๋งค๊ฐ๋ณ์๋ค์ ๋ค์๊ณผ ๊ฐ์ต๋๋ค:
* `sample_size`: ์
๋ ฅ ์ํ์ ๋์ด ๋ฐ ๋๋น ์น์์
๋๋ค.
* `in_channels`: ์
๋ ฅ ์ํ์ ์
๋ ฅ ์ฑ๋ ์์
๋๋ค.
* `down_block_types` ๋ฐ `up_block_types`: UNet ์ํคํ
์ฒ๋ฅผ ์์ฑํ๋ ๋ฐ ์ฌ์ฉ๋๋ ๋ค์ด ๋ฐ ์
์ํ๋ง ๋ธ๋ก์ ์ ํ.
* `block_out_channels`: ๋ค์ด์ํ๋ง ๋ธ๋ก์ ์ถ๋ ฅ ์ฑ๋ ์. ์
์ํ๋ง ๋ธ๋ก์ ์
๋ ฅ ์ฑ๋ ์์ ์ญ์์ผ๋ก ์ฌ์ฉ๋๊ธฐ๋ ํฉ๋๋ค.
* `layers_per_block`: ๊ฐ UNet ๋ธ๋ก์ ์กด์ฌํ๋ ResNet ๋ธ๋ก์ ์์
๋๋ค.
์ถ๋ก ์ ๋ชจ๋ธ์ ์ฌ์ฉํ๋ ค๋ฉด ๋๋ค ๊ฐ์ฐ์์ ๋
ธ์ด์ฆ๋ก ์ด๋ฏธ์ง ๋ชจ์์ ๋ง๋ญ๋๋ค. ๋ชจ๋ธ์ด ์ฌ๋ฌ ๊ฐ์ ๋ฌด์์ ๋
ธ์ด์ฆ๋ฅผ ์์ ํ ์ ์์ผ๋ฏ๋ก 'batch' ์ถ, ์
๋ ฅ ์ฑ๋ ์์ ํด๋นํ๋ 'channel' ์ถ, ์ด๋ฏธ์ง์ ๋์ด์ ๋๋น๋ฅผ ๋ํ๋ด๋ 'sample_size' ์ถ์ด ์์ด์ผ ํฉ๋๋ค:
```py
>>> import torch
>>> torch.manual_seed(0)
>>> noisy_sample = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
>>> noisy_sample.shape
torch.Size([1, 3, 256, 256])
```
์ถ๋ก ์ ์ํด ๋ชจ๋ธ์ ๋
ธ์ด์ฆ๊ฐ ์๋ ์ด๋ฏธ์ง์ `timestep`์ ์ ๋ฌํฉ๋๋ค. 'timestep'์ ์
๋ ฅ ์ด๋ฏธ์ง์ ๋
ธ์ด์ฆ ์ ๋๋ฅผ ๋ํ๋ด๋ฉฐ, ์์ ๋ถ๋ถ์ ๋ ๋ง์ ๋
ธ์ด์ฆ๊ฐ ์๊ณ ๋ ๋ถ๋ถ์ ๋ ์ ์ ๋
ธ์ด์ฆ๊ฐ ์์ต๋๋ค. ์ด๋ฅผ ํตํด ๋ชจ๋ธ์ด diffusion ๊ณผ์ ์์ ์์ ๋๋ ๋์ ๋ ๊ฐ๊น์ด ์์น๋ฅผ ๊ฒฐ์ ํ ์ ์์ต๋๋ค. `sample` ๋ฉ์๋๋ฅผ ์ฌ์ฉํ์ฌ ๋ชจ๋ธ ์ถ๋ ฅ์ ์ป์ต๋๋ค:
```py
>>> with torch.no_grad():
... noisy_residual = model(sample=noisy_sample, timestep=2).sample
```
ํ์ง๋ง ์ค์ ์๋ฅผ ์์ฑํ๋ ค๋ฉด ๋
ธ์ด์ฆ ์ ๊ฑฐ ํ๋ก์ธ์ค๋ฅผ ์๋ดํ ์ค์ผ์ค๋ฌ๊ฐ ํ์ํฉ๋๋ค. ๋ค์ ์น์
์์๋ ๋ชจ๋ธ์ ์ค์ผ์ค๋ฌ์ ๊ฒฐํฉํ๋ ๋ฐฉ๋ฒ์ ๋ํด ์์๋ด
๋๋ค.
## ์ค์ผ์ค๋ฌ
์ค์ผ์ค๋ฌ๋ ๋ชจ๋ธ ์ถ๋ ฅ์ด ์ฃผ์ด์ก์ ๋ ๋
ธ์ด์ฆ๊ฐ ๋ง์ ์ํ์์ ๋
ธ์ด์ฆ๊ฐ ์ ์ ์ํ๋ก ์ ํํ๋ ๊ฒ์ ๊ด๋ฆฌํฉ๋๋ค - ์ด ๊ฒฝ์ฐ 'noisy_residual'.
<Tip>
๐งจ Diffusers๋ Diffusion ์์คํ
์ ๊ตฌ์ถํ๊ธฐ ์ํ ํด๋ฐ์ค์
๋๋ค. [`DiffusionPipeline`]์ ์ฌ์ฉํ๋ฉด ๋ฏธ๋ฆฌ ๋ง๋ค์ด์ง Diffusion ์์คํ
์ ํธ๋ฆฌํ๊ฒ ์์ํ ์ ์์ง๋ง, ๋ชจ๋ธ๊ณผ ์ค์ผ์ค๋ฌ ๊ตฌ์ฑ ์์๋ฅผ ๊ฐ๋ณ์ ์ผ๋ก ์ ํํ์ฌ ์ฌ์ฉ์ ์ง์ Diffusion ์์คํ
์ ๊ตฌ์ถํ ์๋ ์์ต๋๋ค.
</Tip>
ํ์ด๋ณด๊ธฐ์ ๊ฒฝ์ฐ, [`~diffusers.ConfigMixin.from_config`] ๋ฉ์๋๋ฅผ ์ฌ์ฉํ์ฌ [`DDPMScheduler`]๋ฅผ ์ธ์คํด์คํํฉ๋๋ค:
```py
>>> from diffusers import DDPMScheduler
>>> scheduler = DDPMScheduler.from_config(repo_id)
>>> scheduler
DDPMScheduler {
"_class_name": "DDPMScheduler",
"_diffusers_version": "0.13.1",
"beta_end": 0.02,
"beta_schedule": "linear",
"beta_start": 0.0001,
"clip_sample": true,
"clip_sample_range": 1.0,
"num_train_timesteps": 1000,
"prediction_type": "epsilon",
"trained_betas": null,
"variance_type": "fixed_small"
}
```
<Tip>
๐ก ์ค์ผ์ค๋ฌ๊ฐ ๊ตฌ์ฑ์์ ์ด๋ป๊ฒ ์ธ์คํด์คํ๋๋์ง ์ฃผ๋ชฉํ์ธ์. ๋ชจ๋ธ๊ณผ ๋ฌ๋ฆฌ ์ค์ผ์ค๋ฌ์๋ ํ์ต ๊ฐ๋ฅํ ๊ฐ์ค์น๊ฐ ์์ผ๋ฉฐ ๋งค๊ฐ๋ณ์๋ ์์ต๋๋ค!
</Tip>
๊ฐ์ฅ ์ค์ํ ๋งค๊ฐ๋ณ์๋ ๋ค์๊ณผ ๊ฐ์ต๋๋ค:
* `num_train_timesteps`: ๋
ธ์ด์ฆ ์ ๊ฑฐ ํ๋ก์ธ์ค์ ๊ธธ์ด, ์ฆ ๋๋ค ๊ฐ์ฐ์ค ๋
ธ์ด์ฆ๋ฅผ ๋ฐ์ดํฐ ์ํ๋ก ์ฒ๋ฆฌํ๋ ๋ฐ ํ์ํ ํ์์คํ
์์
๋๋ค.
* `beta_schedule`: ์ถ๋ก ๋ฐ ํ์ต์ ์ฌ์ฉํ ๋
ธ์ด์ฆ ์ค์ผ์ค ์ ํ์
๋๋ค.
* `beta_start` ๋ฐ `beta_end`: ๋
ธ์ด์ฆ ์ค์ผ์ค์ ์์ ๋ฐ ์ข
๋ฃ ๋
ธ์ด์ฆ ๊ฐ์
๋๋ค.
๋
ธ์ด์ฆ๊ฐ ์ฝ๊ฐ ์ ์ ์ด๋ฏธ์ง๋ฅผ ์์ธกํ๋ ค๋ฉด ์ค์ผ์ค๋ฌ์ [`~diffusers.DDPMScheduler.step`] ๋ฉ์๋์ ๋ชจ๋ธ ์ถ๋ ฅ, `timestep`, ํ์ฌ `sample`์ ์ ๋ฌํ์ธ์.
```py
>>> less_noisy_sample = scheduler.step(model_output=noisy_residual, timestep=2, sample=noisy_sample).prev_sample
>>> less_noisy_sample.shape
```
`less_noisy_sample`์ ๋ค์ `timestep`์ผ๋ก ๋๊ธฐ๋ฉด ๋
ธ์ด์ฆ๊ฐ ๋ ์ค์ด๋ญ๋๋ค! ์ด์ ์ด ๋ชจ๋ ๊ฒ์ ํ๋ฐ ๋ชจ์ ์ ์ฒด ๋
ธ์ด์ฆ ์ ๊ฑฐ ๊ณผ์ ์ ์๊ฐํํด ๋ณด๊ฒ ์ต๋๋ค.
๋จผ์ ๋
ธ์ด์ฆ ์ ๊ฑฐ๋ ์ด๋ฏธ์ง๋ฅผ ํ์ฒ๋ฆฌํ์ฌ `PIL.Image`๋ก ํ์ํ๋ ํจ์๋ฅผ ๋ง๋ญ๋๋ค:
```py
>>> import PIL.Image
>>> import numpy as np
>>> def display_sample(sample, i):
... image_processed = sample.cpu().permute(0, 2, 3, 1)
... image_processed = (image_processed + 1.0) * 127.5
... image_processed = image_processed.numpy().astype(np.uint8)
... image_pil = PIL.Image.fromarray(image_processed[0])
... display(f"Image at step {i}")
... display(image_pil)
```
๋
ธ์ด์ฆ ์ ๊ฑฐ ํ๋ก์ธ์ค์ ์๋๋ฅผ ๋์ด๋ ค๋ฉด ์
๋ ฅ๊ณผ ๋ชจ๋ธ์ GPU๋ก ์ฎ๊ธฐ์ธ์:
```py
>>> model.to("cuda")
>>> noisy_sample = noisy_sample.to("cuda")
```
์ด์ ๋
ธ์ด์ฆ๊ฐ ์ ์ ์ํ์ ์์ฐจ๋ฅผ ์์ธกํ๊ณ ์ค์ผ์ค๋ฌ๋ก ๋
ธ์ด์ฆ๊ฐ ์ ์ ์ํ์ ๊ณ์ฐํ๋ ๋
ธ์ด์ฆ ์ ๊ฑฐ ๋ฃจํ๋ฅผ ์์ฑํฉ๋๋ค:
```py
>>> import tqdm
>>> sample = noisy_sample
>>> for i, t in enumerate(tqdm.tqdm(scheduler.timesteps)):
... # 1. predict noise residual
... with torch.no_grad():
... residual = model(sample, t).sample
... # 2. compute less noisy image and set x_t -> x_t-1
... sample = scheduler.step(residual, t, sample).prev_sample
... # 3. optionally look at image
... if (i + 1) % 50 == 0:
... display_sample(sample, i + 1)
```
๊ฐ๋งํ ์์์ ๊ณ ์์ด๊ฐ ์์์ผ๋ก๋ง ์์ฑ๋๋ ๊ฒ์ ์ง์ผ๋ณด์ธ์!๐ป
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/diffusion-quicktour.png"/>
</div>
## ๋ค์ ๋จ๊ณ
์ด๋ฒ ํ์ด๋ณด๊ธฐ์์ ๐งจ Diffusers๋ก ๋ฉ์ง ์ด๋ฏธ์ง๋ฅผ ๋ง๋ค์ด ๋ณด์
จ๊ธฐ๋ฅผ ๋ฐ๋๋๋ค! ๋ค์ ๋จ๊ณ๋ก ๋์ด๊ฐ์ธ์:
* [training](./tutorials/basic_training) ํํ ๋ฆฌ์ผ์์ ๋ชจ๋ธ์ ํ์ตํ๊ฑฐ๋ ํ์ธํ๋ํ์ฌ ๋๋ง์ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ ์ ์์ต๋๋ค.
* ๋ค์ํ ์ฌ์ฉ ์ฌ๋ก๋ ๊ณต์ ๋ฐ ์ปค๋ฎค๋ํฐ [ํ์ต ๋๋ ํ์ธํ๋ ์คํฌ๋ฆฝํธ](https://github.com/huggingface/diffusers/tree/main/examples#-diffusers-examples) ์์๋ฅผ ์ฐธ์กฐํ์ธ์.
* ์ค์ผ์ค๋ฌ ๋ก๋, ์ก์ธ์ค, ๋ณ๊ฒฝ ๋ฐ ๋น๊ต์ ๋ํ ์์ธํ ๋ด์ฉ์ [๋ค๋ฅธ ์ค์ผ์ค๋ฌ ์ฌ์ฉ](./using-diffusers/schedulers) ๊ฐ์ด๋์์ ํ์ธํ์ธ์.
* [Stable Diffusion](./stable_diffusion) ๊ฐ์ด๋์์ ํ๋กฌํํธ ์์ง๋์ด๋ง, ์๋ ๋ฐ ๋ฉ๋ชจ๋ฆฌ ์ต์ ํ, ๊ณ ํ์ง ์ด๋ฏธ์ง ์์ฑ์ ์ํ ํ๊ณผ ์๋ น์ ์ดํด๋ณด์ธ์.
* [GPU์์ ํ์ดํ ์น ์ต์ ํ](./optimization/fp16) ๊ฐ์ด๋์ [์ ํ ์ค๋ฆฌ์ฝ(M1/M2)์์์ Stable Diffusion](./optimization/mps) ๋ฐ [ONNX ๋ฐํ์](./optimization/onnx) ์คํ์ ๋ํ ์ถ๋ก ๊ฐ์ด๋๋ฅผ ํตํด ๐งจ Diffuser ์๋๋ฅผ ๋์ด๋ ๋ฐฉ๋ฒ์ ๋ ์์ธํ ์์๋ณด์ธ์.
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/tutorials/tutorial_overview.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Overview
๐งจย Diffusers์ ์ค์ ๊ฑธ ํ์ํฉ๋๋ค! ์ฌ๋ฌ๋ถ์ด diffusion ๋ชจ๋ธ๊ณผ ์์ฑ AI๋ฅผ ์ฒ์ ์ ํ๊ณ , ๋ ๋ง์ ๊ฑธ ๋ฐฐ์ฐ๊ณ ์ถ์ผ์
จ๋ค๋ฉด ์ ๋๋ก ์ฐพ์์ค์
จ์ต๋๋ค. ์ด ํํ ๋ฆฌ์ผ์ diffusion model์ ์ฌ๋ฌ๋ถ์๊ฒ ์ ํํ๊ฒ ์๊ฐํ๊ณ , ๋ผ์ด๋ธ๋ฌ๋ฆฌ์ ๊ธฐ๋ณธ ์ฌํญ(ํต์ฌ ๊ตฌ์ฑ์์์ ๐งจย Diffusers ์ฌ์ฉ๋ฒ)์ ์ดํดํ๋ ๋ฐ ๋์์ด ๋๋๋ก ์ค๊ณ๋์์ต๋๋ค.
์ฌ๋ฌ๋ถ์ ์ด ํํ ๋ฆฌ์ผ์ ํตํด ๋น ๋ฅด๊ฒ ์์ฑํ๊ธฐ ์ํด์ ์ถ๋ก ํ์ดํ๋ผ์ธ์ ์ด๋ป๊ฒ ์ฌ์ฉํด์ผ ํ๋์ง, ๊ทธ๋ฆฌ๊ณ ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ฅผ modular toolbox์ฒ๋ผ ์ด์ฉํด์ ์ฌ๋ฌ๋ถ๋ง์ diffusion system์ ๊ตฌ์ถํ ์ ์๋๋ก ํ์ดํ๋ผ์ธ์ ๋ถํดํ๋ ๋ฒ์ ๋ฐฐ์ธ ์ ์์ต๋๋ค. ๋ค์ ๋จ์์์๋ ์ฌ๋ฌ๋ถ์ด ์ํ๋ ๊ฒ์ ์์ฑํ๊ธฐ ์ํด ์์ ๋ง์ diffusion model์ ํ์ตํ๋ ๋ฐฉ๋ฒ์ ๋ฐฐ์ฐ๊ฒ ๋ฉ๋๋ค.
ํํ ๋ฆฌ์ผ์ ์๋ฃํ๋ค๋ฉด ์ฌ๋ฌ๋ถ์ ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ฅผ ์ง์ ํ์ํ๊ณ , ์์ ์ ํ๋ก์ ํธ์ ์ ํ๋ฆฌ์ผ์ด์
์ ์ ์ฉํ ์คํฌ๋ค์ ์ต๋ํ ์ ์์ ๊ฒ๋๋ค.
[Discord](https://discord.com/invite/JfAtkvEtRb)๋ [ํฌ๋ผ](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63) ์ปค๋ฎค๋ํฐ์ ์์ ๋กญ๊ฒ ์ฐธ์ฌํด์ ๋ค๋ฅธ ์ฌ์ฉ์์ ๊ฐ๋ฐ์๋ค๊ณผ ๊ต๋ฅํ๊ณ ํ์
ํด ๋ณด์ธ์!
์ ์ง๊ธ๋ถํฐ diffusing์ ์์ํด ๋ณด๊ฒ ์ต๋๋ค! ๐งจ
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/tutorials/basic_training.md
|
๏ปฟ<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
[[open-in-colab]]
# Diffusion ๋ชจ๋ธ์ ํ์ตํ๊ธฐ
Unconditional ์ด๋ฏธ์ง ์์ฑ์ ํ์ต์ ์ฌ์ฉ๋ ๋ฐ์ดํฐ์
๊ณผ ์ ์ฌํ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ๋ diffusion ๋ชจ๋ธ์์ ์ธ๊ธฐ ์๋ ์ดํ๋ฆฌ์ผ์ด์
์
๋๋ค. ์ผ๋ฐ์ ์ผ๋ก, ๊ฐ์ฅ ์ข์ ๊ฒฐ๊ณผ๋ ํน์ ๋ฐ์ดํฐ์
์ ์ฌ์ ํ๋ จ๋ ๋ชจ๋ธ์ ํ์ธํ๋ํ๋ ๊ฒ์ผ๋ก ์ป์ ์ ์์ต๋๋ค. ์ด [ํ๋ธ](https://huggingface.co/search/full-text?q=unconditional-image-generation&type=model)์์ ์ด๋ฌํ ๋ง์ ์ฒดํฌํฌ์ธํธ๋ฅผ ์ฐพ์ ์ ์์ง๋ง, ๋ง์ฝ ๋ง์์ ๋๋ ์ฒดํฌํฌ์ธํธ๋ฅผ ์ฐพ์ง ๋ชปํ๋ค๋ฉด, ์ธ์ ๋ ์ง ์ค์ค๋ก ํ์ตํ ์ ์์ต๋๋ค!
์ด ํํ ๋ฆฌ์ผ์ ๋๋ง์ ๐ฆ ๋๋น ๐ฆ๋ฅผ ์์ฑํ๊ธฐ ์ํด [Smithsonian Butterflies](https://huggingface.co/datasets/huggan/smithsonian_butterflies_subset) ๋ฐ์ดํฐ์
์ ํ์ ์งํฉ์์ [`UNet2DModel`] ๋ชจ๋ธ์ ํ์ตํ๋ ๋ฐฉ๋ฒ์ ๊ฐ๋ฅด์ณ์ค ๊ฒ์
๋๋ค.
<Tip>
๐ก ์ด ํ์ต ํํ ๋ฆฌ์ผ์ [Training with ๐งจ Diffusers](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) ๋
ธํธ๋ถ ๊ธฐ๋ฐ์ผ๋ก ํฉ๋๋ค. Diffusion ๋ชจ๋ธ์ ์๋ ๋ฐฉ์ ๋ฐ ์์ธํ ๋ด์ฉ์ ๋
ธํธ๋ถ์ ํ์ธํ์ธ์!
</Tip>
์์ ์ ์, ๐ค Datasets์ ๋ถ๋ฌ์ค๊ณ ์ ์ฒ๋ฆฌํ๊ธฐ ์ํด ๋ฐ์ดํฐ์
์ด ์ค์น๋์ด ์๋์ง ๋ค์ GPU์์ ํ์ต์ ๊ฐ์ํํ๊ธฐ ์ํด ๐ค Accelerate ๊ฐ ์ค์น๋์ด ์๋์ง ํ์ธํ์ธ์. ๊ทธ ํ ํ์ต ๋ฉํธ๋ฆญ์ ์๊ฐํํ๊ธฐ ์ํด [TensorBoard](https://www.tensorflow.org/tensorboard)๋ฅผ ๋ํ ์ค์นํ์ธ์. (๋ํ ํ์ต ์ถ์ ์ ์ํด [Weights & Biases](https://docs.wandb.ai/)๋ฅผ ์ฌ์ฉํ ์ ์์ต๋๋ค.)
```bash
!pip install diffusers[training]
```
์ปค๋ฎค๋ํฐ์ ๋ชจ๋ธ์ ๊ณต์ ํ ๊ฒ์ ๊ถ์ฅํ๋ฉฐ, ์ด๋ฅผ ์ํด์ Hugging Face ๊ณ์ ์ ๋ก๊ทธ์ธ์ ํด์ผ ํฉ๋๋ค. (๊ณ์ ์ด ์๋ค๋ฉด [์ฌ๊ธฐ](https://hf.co/join)์์ ๋ง๋ค ์ ์์ต๋๋ค.) ๋
ธํธ๋ถ์์ ๋ก๊ทธ์ธํ ์ ์์ผ๋ฉฐ ๋ฉ์์ง๊ฐ ํ์๋๋ฉด ํ ํฐ์ ์
๋ ฅํ ์ ์์ต๋๋ค.
```py
>>> from huggingface_hub import notebook_login
>>> notebook_login()
```
๋๋ ํฐ๋ฏธ๋๋ก ๋ก๊ทธ์ธํ ์ ์์ต๋๋ค:
```bash
huggingface-cli login
```
๋ชจ๋ธ ์ฒดํฌํฌ์ธํธ๊ฐ ์๋นํ ํฌ๊ธฐ ๋๋ฌธ์ [Git-LFS](https://git-lfs.com/)์์ ๋์ฉ๋ ํ์ผ์ ๋ฒ์ ๊ด๋ฆฌ๋ฅผ ํ ์ ์์ต๋๋ค.
```bash
!sudo apt -qq install git-lfs
!git config --global credential.helper store
```
## ํ์ต ๊ตฌ์ฑ
ํธ์๋ฅผ ์ํด ํ์ต ํ๋ผ๋ฏธํฐ๋ค์ ํฌํจํ `TrainingConfig` ํด๋์ค๋ฅผ ์์ฑํฉ๋๋ค (์์ ๋กญ๊ฒ ์กฐ์ ๊ฐ๋ฅ):
```py
>>> from dataclasses import dataclass
>>> @dataclass
... class TrainingConfig:
... image_size = 128 # ์์ฑ๋๋ ์ด๋ฏธ์ง ํด์๋
... train_batch_size = 16
... eval_batch_size = 16 # ํ๊ฐ ๋์์ ์ํ๋งํ ์ด๋ฏธ์ง ์
... num_epochs = 50
... gradient_accumulation_steps = 1
... learning_rate = 1e-4
... lr_warmup_steps = 500
... save_image_epochs = 10
... save_model_epochs = 30
... mixed_precision = "fp16" # `no`๋ float32, ์๋ ํผํฉ ์ ๋ฐ๋๋ฅผ ์ํ `fp16`
... output_dir = "ddpm-butterflies-128" # ๋ก์ปฌ ๋ฐ HF Hub์ ์ ์ฅ๋๋ ๋ชจ๋ธ๋ช
... push_to_hub = True # ์ ์ฅ๋ ๋ชจ๋ธ์ HF Hub์ ์
๋ก๋ํ ์ง ์ฌ๋ถ
... hub_private_repo = False
... overwrite_output_dir = True # ๋
ธํธ๋ถ์ ๋ค์ ์คํํ ๋ ์ด์ ๋ชจ๋ธ์ ๋ฎ์ด์์ธ์ง
... seed = 0
>>> config = TrainingConfig()
```
## ๋ฐ์ดํฐ์
๋ถ๋ฌ์ค๊ธฐ
๐ค Datasets ๋ผ์ด๋ธ๋ฌ๋ฆฌ์ [Smithsonian Butterflies](https://huggingface.co/datasets/huggan/smithsonian_butterflies_subset) ๋ฐ์ดํฐ์
์ ์ฝ๊ฒ ๋ถ๋ฌ์ฌ ์ ์์ต๋๋ค.
```py
>>> from datasets import load_dataset
>>> config.dataset_name = "huggan/smithsonian_butterflies_subset"
>>> dataset = load_dataset(config.dataset_name, split="train")
```
๐ก[HugGan Community Event](https://huggingface.co/huggan) ์์ ์ถ๊ฐ์ ๋ฐ์ดํฐ์
์ ์ฐพ๊ฑฐ๋ ๋ก์ปฌ์ [`ImageFolder`](https://huggingface.co/docs/datasets/image_dataset#imagefolder)๋ฅผ ๋ง๋ฆ์ผ๋ก์จ ๋๋ง์ ๋ฐ์ดํฐ์
์ ์ฌ์ฉํ ์ ์์ต๋๋ค. HugGan Community Event ์ ๊ฐ์ ธ์จ ๋ฐ์ดํฐ์
์ ๊ฒฝ์ฐ ๋ฆฌํฌ์งํ ๋ฆฌ์ id๋ก `config.dataset_name` ์ ์ค์ ํ๊ณ , ๋๋ง์ ์ด๋ฏธ์ง๋ฅผ ์ฌ์ฉํ๋ ๊ฒฝ์ฐ `imagefolder` ๋ฅผ ์ค์ ํฉ๋๋ค.
๐ค Datasets์ [`~datasets.Image`] ๊ธฐ๋ฅ์ ์ฌ์ฉํด ์๋์ผ๋ก ์ด๋ฏธ์ง ๋ฐ์ดํฐ๋ฅผ ๋์ฝ๋ฉํ๊ณ [`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html)๋ก ๋ถ๋ฌ์ต๋๋ค. ์ด๋ฅผ ์๊ฐํ ํด๋ณด๋ฉด:
```py
>>> import matplotlib.pyplot as plt
>>> fig, axs = plt.subplots(1, 4, figsize=(16, 4))
>>> for i, image in enumerate(dataset[:4]["image"]):
... axs[i].imshow(image)
... axs[i].set_axis_off()
>>> fig.show()
```

์ด๋ฏธ์ง๋ ๋ชจ๋ ๋ค๋ฅธ ์ฌ์ด์ฆ์ด๊ธฐ ๋๋ฌธ์, ์ฐ์ ์ ์ฒ๋ฆฌ๊ฐ ํ์ํฉ๋๋ค:
- `Resize` ๋ `config.image_size` ์ ์ ์๋ ์ด๋ฏธ์ง ์ฌ์ด์ฆ๋ก ๋ณ๊ฒฝํฉ๋๋ค.
- `RandomHorizontalFlip` ์ ๋๋ค์ ์ผ๋ก ์ด๋ฏธ์ง๋ฅผ ๋ฏธ๋ฌ๋งํ์ฌ ๋ฐ์ดํฐ์
์ ๋ณด๊ฐํฉ๋๋ค.
- `Normalize` ๋ ๋ชจ๋ธ์ด ์์ํ๋ [-1, 1] ๋ฒ์๋ก ํฝ์
๊ฐ์ ์ฌ์กฐ์ ํ๋๋ฐ ์ค์ํฉ๋๋ค.
```py
>>> from torchvision import transforms
>>> preprocess = transforms.Compose(
... [
... transforms.Resize((config.image_size, config.image_size)),
... transforms.RandomHorizontalFlip(),
... transforms.ToTensor(),
... transforms.Normalize([0.5], [0.5]),
... ]
... )
```
ํ์ต ๋์ค์ `preprocess` ํจ์๋ฅผ ์ ์ฉํ๋ ค๋ฉด ๐ค Datasets์ [`~datasets.Dataset.set_transform`] ๋ฐฉ๋ฒ์ด ์ฌ์ฉ๋ฉ๋๋ค.
```py
>>> def transform(examples):
... images = [preprocess(image.convert("RGB")) for image in examples["image"]]
... return {"images": images}
>>> dataset.set_transform(transform)
```
์ด๋ฏธ์ง์ ํฌ๊ธฐ๊ฐ ์กฐ์ ๋์๋์ง ํ์ธํ๊ธฐ ์ํด ์ด๋ฏธ์ง๋ฅผ ๋ค์ ์๊ฐํํด๋ณด์ธ์. ์ด์ [DataLoader](https://pytorch.org/docs/stable/data#torch.utils.data.DataLoader)์ ๋ฐ์ดํฐ์
์ ํฌํจํด ํ์ตํ ์ค๋น๊ฐ ๋์์ต๋๋ค!
```py
>>> import torch
>>> train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=config.train_batch_size, shuffle=True)
```
## UNet2DModel ์์ฑํ๊ธฐ
๐งจ Diffusers์ ์ฌ์ ํ์ต๋ ๋ชจ๋ธ๋ค์ ๋ชจ๋ธ ํด๋์ค์์ ์ํ๋ ํ๋ผ๋ฏธํฐ๋ก ์ฝ๊ฒ ์์ฑํ ์ ์์ต๋๋ค. ์๋ฅผ ๋ค์ด, [`UNet2DModel`]๋ฅผ ์์ฑํ๋ ค๋ฉด:
```py
>>> from diffusers import UNet2DModel
>>> model = UNet2DModel(
... sample_size=config.image_size, # ํ๊ฒ ์ด๋ฏธ์ง ํด์๋
... in_channels=3, # ์
๋ ฅ ์ฑ๋ ์, RGB ์ด๋ฏธ์ง์์ 3
... out_channels=3, # ์ถ๋ ฅ ์ฑ๋ ์
... layers_per_block=2, # UNet ๋ธ๋ญ๋น ๋ช ๊ฐ์ ResNet ๋ ์ด์ด๊ฐ ์ฌ์ฉ๋๋์ง
... block_out_channels=(128, 128, 256, 256, 512, 512), # ๊ฐ UNet ๋ธ๋ญ์ ์ํ ์ถ๋ ฅ ์ฑ๋ ์
... down_block_types=(
... "DownBlock2D", # ์ผ๋ฐ์ ์ธ ResNet ๋ค์ด์ํ๋ง ๋ธ๋ญ
... "DownBlock2D",
... "DownBlock2D",
... "DownBlock2D",
... "AttnDownBlock2D", # spatial self-attention์ด ํฌํจ๋ ์ผ๋ฐ์ ์ธ ResNet ๋ค์ด์ํ๋ง ๋ธ๋ญ
... "DownBlock2D",
... ),
... up_block_types=(
... "UpBlock2D", # ์ผ๋ฐ์ ์ธ ResNet ์
์ํ๋ง ๋ธ๋ญ
... "AttnUpBlock2D", # spatial self-attention์ด ํฌํจ๋ ์ผ๋ฐ์ ์ธ ResNet ์
์ํ๋ง ๋ธ๋ญ
... "UpBlock2D",
... "UpBlock2D",
... "UpBlock2D",
... "UpBlock2D",
... ),
... )
```
์ํ์ ์ด๋ฏธ์ง ํฌ๊ธฐ์ ๋ชจ๋ธ ์ถ๋ ฅ ํฌ๊ธฐ๊ฐ ๋ง๋์ง ๋น ๋ฅด๊ฒ ํ์ธํ๊ธฐ ์ํ ์ข์ ์์ด๋์ด๊ฐ ์์ต๋๋ค:
```py
>>> sample_image = dataset[0]["images"].unsqueeze(0)
>>> print("Input shape:", sample_image.shape)
Input shape: torch.Size([1, 3, 128, 128])
>>> print("Output shape:", model(sample_image, timestep=0).sample.shape)
Output shape: torch.Size([1, 3, 128, 128])
```
ํ๋ฅญํด์! ๋ค์, ์ด๋ฏธ์ง์ ์ฝ๊ฐ์ ๋
ธ์ด์ฆ๋ฅผ ๋ํ๊ธฐ ์ํด ์ค์ผ์ค๋ฌ๊ฐ ํ์ํฉ๋๋ค.
## ์ค์ผ์ค๋ฌ ์์ฑํ๊ธฐ
์ค์ผ์ค๋ฌ๋ ๋ชจ๋ธ์ ํ์ต ๋๋ ์ถ๋ก ์ ์ฌ์ฉํ๋์ง์ ๋ฐ๋ผ ๋ค๋ฅด๊ฒ ์๋ํฉ๋๋ค. ์ถ๋ก ์์, ์ค์ผ์ค๋ฌ๋ ๋
ธ์ด์ฆ๋ก๋ถํฐ ์ด๋ฏธ์ง๋ฅผ ์์ฑํฉ๋๋ค. ํ์ต์ ์ค์ผ์ค๋ฌ๋ diffusion ๊ณผ์ ์์์ ํน์ ํฌ์ธํธ๋ก๋ถํฐ ๋ชจ๋ธ์ ์ถ๋ ฅ ๋๋ ์ํ์ ๊ฐ์ ธ์ *๋
ธ์ด์ฆ ์ค์ผ์ค* ๊ณผ *์
๋ฐ์ดํธ ๊ท์น*์ ๋ฐ๋ผ ์ด๋ฏธ์ง์ ๋
ธ์ด์ฆ๋ฅผ ์ ์ฉํฉ๋๋ค.
`DDPMScheduler`๋ฅผ ๋ณด๋ฉด ์ด์ ์ผ๋ก๋ถํฐ `sample_image`์ ๋๋คํ ๋
ธ์ด์ฆ๋ฅผ ๋ํ๋ `add_noise` ๋ฉ์๋๋ฅผ ์ฌ์ฉํฉ๋๋ค:
```py
>>> import torch
>>> from PIL import Image
>>> from diffusers import DDPMScheduler
>>> noise_scheduler = DDPMScheduler(num_train_timesteps=1000)
>>> noise = torch.randn(sample_image.shape)
>>> timesteps = torch.LongTensor([50])
>>> noisy_image = noise_scheduler.add_noise(sample_image, noise, timesteps)
>>> Image.fromarray(((noisy_image.permute(0, 2, 3, 1) + 1.0) * 127.5).type(torch.uint8).numpy()[0])
```

๋ชจ๋ธ์ ํ์ต ๋ชฉ์ ์ ์ด๋ฏธ์ง์ ๋ํด์ง ๋
ธ์ด์ฆ๋ฅผ ์์ธกํ๋ ๊ฒ์
๋๋ค. ์ด ๋จ๊ณ์์ ์์ค์ ๋ค์๊ณผ ๊ฐ์ด ๊ณ์ฐ๋ ์ ์์ต๋๋ค:
```py
>>> import torch.nn.functional as F
>>> noise_pred = model(noisy_image, timesteps).sample
>>> loss = F.mse_loss(noise_pred, noise)
```
## ๋ชจ๋ธ ํ์ตํ๊ธฐ
์ง๊ธ๊น์ง, ๋ชจ๋ธ ํ์ต์ ์์ํ๊ธฐ ์ํด ๋ง์ ๋ถ๋ถ์ ๊ฐ์ถ์์ผ๋ฉฐ ์ด์ ๋จ์ ๊ฒ์ ๋ชจ๋ ๊ฒ์ ์กฐํฉํ๋ ๊ฒ์
๋๋ค.
์ฐ์ ์ตํฐ๋ง์ด์ (optimizer)์ ํ์ต๋ฅ ์ค์ผ์ค๋ฌ(learning rate scheduler)๊ฐ ํ์ํ ๊ฒ์
๋๋ค:
```py
>>> from diffusers.optimization import get_cosine_schedule_with_warmup
>>> optimizer = torch.optim.AdamW(model.parameters(), lr=config.learning_rate)
>>> lr_scheduler = get_cosine_schedule_with_warmup(
... optimizer=optimizer,
... num_warmup_steps=config.lr_warmup_steps,
... num_training_steps=(len(train_dataloader) * config.num_epochs),
... )
```
๊ทธ ํ, ๋ชจ๋ธ์ ํ๊ฐํ๋ ๋ฐฉ๋ฒ์ด ํ์ํฉ๋๋ค. ํ๊ฐ๋ฅผ ์ํด, `DDPMPipeline`์ ์ฌ์ฉํด ๋ฐฐ์น์ ์ด๋ฏธ์ง ์ํ๋ค์ ์์ฑํ๊ณ ๊ทธ๋ฆฌ๋ ํํ๋ก ์ ์ฅํ ์ ์์ต๋๋ค:
```py
>>> from diffusers import DDPMPipeline
>>> import math
>>> import os
>>> def make_grid(images, rows, cols):
... w, h = images[0].size
... grid = Image.new("RGB", size=(cols * w, rows * h))
... for i, image in enumerate(images):
... grid.paste(image, box=(i % cols * w, i // cols * h))
... return grid
>>> def evaluate(config, epoch, pipeline):
... # ๋๋คํ ๋
ธ์ด์ฆ๋ก ๋ถํฐ ์ด๋ฏธ์ง๋ฅผ ์ถ์ถํฉ๋๋ค.(์ด๋ ์ญ์ ํ diffusion ๊ณผ์ ์
๋๋ค.)
... # ๊ธฐ๋ณธ ํ์ดํ๋ผ์ธ ์ถ๋ ฅ ํํ๋ `List[PIL.Image]` ์
๋๋ค.
... images = pipeline(
... batch_size=config.eval_batch_size,
... generator=torch.manual_seed(config.seed),
... ).images
... # ์ด๋ฏธ์ง๋ค์ ๊ทธ๋ฆฌ๋๋ก ๋ง๋ค์ด์ค๋๋ค.
... image_grid = make_grid(images, rows=4, cols=4)
... # ์ด๋ฏธ์ง๋ค์ ์ ์ฅํฉ๋๋ค.
... test_dir = os.path.join(config.output_dir, "samples")
... os.makedirs(test_dir, exist_ok=True)
... image_grid.save(f"{test_dir}/{epoch:04d}.png")
```
TensorBoard์ ๋ก๊น
, ๊ทธ๋๋์ธํธ ๋์ ๋ฐ ํผํฉ ์ ๋ฐ๋ ํ์ต์ ์ฝ๊ฒ ์ํํ๊ธฐ ์ํด ๐ค Accelerate๋ฅผ ํ์ต ๋ฃจํ์ ํจ๊ป ์์ ๋งํ ๋ชจ๋ ๊ตฌ์ฑ ์ ๋ณด๋ค์ ๋ฌถ์ด ์งํํ ์ ์์ต๋๋ค. ํ๋ธ์ ๋ชจ๋ธ์ ์
๋ก๋ ํ๊ธฐ ์ํด ๋ฆฌํฌ์งํ ๋ฆฌ ์ด๋ฆ ๋ฐ ์ ๋ณด๋ฅผ ๊ฐ์ ธ์ค๊ธฐ ์ํ ํจ์๋ฅผ ์์ฑํ๊ณ ํ๋ธ์ ์
๋ก๋ํ ์ ์์ต๋๋ค.
๐ก์๋์ ํ์ต ๋ฃจํ๋ ์ด๋ ต๊ณ ๊ธธ์ด ๋ณด์ผ ์ ์์ง๋ง, ๋์ค์ ํ ์ค์ ์ฝ๋๋ก ํ์ต์ ํ๋ค๋ฉด ๊ทธ๋งํ ๊ฐ์น๊ฐ ์์ ๊ฒ์
๋๋ค! ๋ง์ฝ ๊ธฐ๋ค๋ฆฌ์ง ๋ชปํ๊ณ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ๊ณ ์ถ๋ค๋ฉด, ์๋ ์ฝ๋๋ฅผ ์์ ๋กญ๊ฒ ๋ถ์ฌ๋ฃ๊ณ ์๋์ํค๋ฉด ๋ฉ๋๋ค. ๐ค
```py
>>> from accelerate import Accelerator
>>> from huggingface_hub import create_repo, upload_folder
>>> from tqdm.auto import tqdm
>>> from pathlib import Path
>>> import os
>>> def train_loop(config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler):
... # Initialize accelerator and tensorboard logging
... accelerator = Accelerator(
... mixed_precision=config.mixed_precision,
... gradient_accumulation_steps=config.gradient_accumulation_steps,
... log_with="tensorboard",
... project_dir=os.path.join(config.output_dir, "logs"),
... )
... if accelerator.is_main_process:
... if config.output_dir is not None:
... os.makedirs(config.output_dir, exist_ok=True)
... if config.push_to_hub:
... repo_id = create_repo(
... repo_id=config.hub_model_id or Path(config.output_dir).name, exist_ok=True
... ).repo_id
... accelerator.init_trackers("train_example")
... # ๋ชจ๋ ๊ฒ์ด ์ค๋น๋์์ต๋๋ค.
... # ๊ธฐ์ตํด์ผ ํ ํน์ ํ ์์๋ ์์ผ๋ฉฐ ์ค๋นํ ๋ฐฉ๋ฒ์ ์ ๊ณตํ ๊ฒ๊ณผ ๋์ผํ ์์๋ก ๊ฐ์ฒด์ ์์ถ์ ํ๋ฉด ๋ฉ๋๋ค.
... model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
... model, optimizer, train_dataloader, lr_scheduler
... )
... global_step = 0
... # ์ด์ ๋ชจ๋ธ์ ํ์ตํฉ๋๋ค.
... for epoch in range(config.num_epochs):
... progress_bar = tqdm(total=len(train_dataloader), disable=not accelerator.is_local_main_process)
... progress_bar.set_description(f"Epoch {epoch}")
... for step, batch in enumerate(train_dataloader):
... clean_images = batch["images"]
... # ์ด๋ฏธ์ง์ ๋ํ ๋
ธ์ด์ฆ๋ฅผ ์ํ๋งํฉ๋๋ค.
... noise = torch.randn(clean_images.shape, device=clean_images.device)
... bs = clean_images.shape[0]
... # ๊ฐ ์ด๋ฏธ์ง๋ฅผ ์ํ ๋๋คํ ํ์์คํ
(timestep)์ ์ํ๋งํฉ๋๋ค.
... timesteps = torch.randint(
... 0, noise_scheduler.config.num_train_timesteps, (bs,), device=clean_images.device,
... dtype=torch.int64
... )
... # ๊ฐ ํ์์คํ
์ ๋
ธ์ด์ฆ ํฌ๊ธฐ์ ๋ฐ๋ผ ๊นจ๋ํ ์ด๋ฏธ์ง์ ๋
ธ์ด์ฆ๋ฅผ ์ถ๊ฐํฉ๋๋ค.
... # (์ด๋ foward diffusion ๊ณผ์ ์
๋๋ค.)
... noisy_images = noise_scheduler.add_noise(clean_images, noise, timesteps)
... with accelerator.accumulate(model):
... # ๋
ธ์ด์ฆ๋ฅผ ๋ฐ๋ณต์ ์ผ๋ก ์์ธกํฉ๋๋ค.
... noise_pred = model(noisy_images, timesteps, return_dict=False)[0]
... loss = F.mse_loss(noise_pred, noise)
... accelerator.backward(loss)
... accelerator.clip_grad_norm_(model.parameters(), 1.0)
... optimizer.step()
... lr_scheduler.step()
... optimizer.zero_grad()
... progress_bar.update(1)
... logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0], "step": global_step}
... progress_bar.set_postfix(**logs)
... accelerator.log(logs, step=global_step)
... global_step += 1
... # ๊ฐ ์ํฌํฌ๊ฐ ๋๋ ํ evaluate()์ ๋ช ๊ฐ์ง ๋ฐ๋ชจ ์ด๋ฏธ์ง๋ฅผ ์ ํ์ ์ผ๋ก ์ํ๋งํ๊ณ ๋ชจ๋ธ์ ์ ์ฅํฉ๋๋ค.
... if accelerator.is_main_process:
... pipeline = DDPMPipeline(unet=accelerator.unwrap_model(model), scheduler=noise_scheduler)
... if (epoch + 1) % config.save_image_epochs == 0 or epoch == config.num_epochs - 1:
... evaluate(config, epoch, pipeline)
... if (epoch + 1) % config.save_model_epochs == 0 or epoch == config.num_epochs - 1:
... if config.push_to_hub:
... upload_folder(
... repo_id=repo_id,
... folder_path=config.output_dir,
... commit_message=f"Epoch {epoch}",
... ignore_patterns=["step_*", "epoch_*"],
... )
... else:
... pipeline.save_pretrained(config.output_dir)
```
ํด, ์ฝ๋๊ฐ ๊ฝค ๋ง์๋ค์! ํ์ง๋ง ๐ค Accelerate์ [`~accelerate.notebook_launcher`] ํจ์์ ํ์ต์ ์์ํ ์ค๋น๊ฐ ๋์์ต๋๋ค. ํจ์์ ํ์ต ๋ฃจํ, ๋ชจ๋ ํ์ต ์ธ์, ํ์ต์ ์ฌ์ฉํ ํ๋ก์ธ์ค ์(์ฌ์ฉ ๊ฐ๋ฅํ GPU์ ์๋ฅผ ๋ณ๊ฒฝํ ์ ์์)๋ฅผ ์ ๋ฌํฉ๋๋ค:
```py
>>> from accelerate import notebook_launcher
>>> args = (config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler)
>>> notebook_launcher(train_loop, args, num_processes=1)
```
ํ๋ฒ ํ์ต์ด ์๋ฃ๋๋ฉด, diffusion ๋ชจ๋ธ๋ก ์์ฑ๋ ์ต์ข
๐ฆ์ด๋ฏธ์ง๐ฆ๋ฅผ ํ์ธํด๋ณด๊ธธ ๋ฐ๋๋๋ค!
```py
>>> import glob
>>> sample_images = sorted(glob.glob(f"{config.output_dir}/samples/*.png"))
>>> Image.open(sample_images[-1])
```

## ๋ค์ ๋จ๊ณ
Unconditional ์ด๋ฏธ์ง ์์ฑ์ ํ์ต๋ ์ ์๋ ์์
์ค ํ๋์ ์์์
๋๋ค. ๋ค๋ฅธ ์์
๊ณผ ํ์ต ๋ฐฉ๋ฒ์ [๐งจ Diffusers ํ์ต ์์](../training/overview) ํ์ด์ง์์ ํ์ธํ ์ ์์ต๋๋ค. ๋ค์์ ํ์ตํ ์ ์๋ ๋ช ๊ฐ์ง ์์์
๋๋ค:
- [Textual Inversion](../training/text_inversion), ํน์ ์๊ฐ์ ๊ฐ๋
์ ํ์ต์์ผ ์์ฑ๋ ์ด๋ฏธ์ง์ ํตํฉ์ํค๋ ์๊ณ ๋ฆฌ์ฆ์
๋๋ค.
- [DreamBooth](../training/dreambooth), ์ฃผ์ ์ ๋ํ ๋ช ๊ฐ์ง ์
๋ ฅ ์ด๋ฏธ์ง๋ค์ด ์ฃผ์ด์ง๋ฉด ์ฃผ์ ์ ๋ํ ๊ฐ์ธํ๋ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ๊ธฐ ์ํ ๊ธฐ์ ์
๋๋ค.
- [Guide](../training/text2image) ๋ฐ์ดํฐ์
์ Stable Diffusion ๋ชจ๋ธ์ ํ์ธํ๋ํ๋ ๋ฐฉ๋ฒ์
๋๋ค.
- [Guide](../training/lora) LoRA๋ฅผ ์ฌ์ฉํด ๋งค์ฐ ํฐ ๋ชจ๋ธ์ ๋น ๋ฅด๊ฒ ํ์ธํ๋ํ๊ธฐ ์ํ ๋ฉ๋ชจ๋ฆฌ ํจ์จ์ ์ธ ๊ธฐ์ ์
๋๋ค.
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/using-diffusers/stable_diffusion_jax_how_to.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# JAX / Flax์์์ ๐งจ Stable Diffusion!
[[open-in-colab]]
๐ค Hugging Face [Diffusers] (https://github.com/huggingface/diffusers) ๋ ๋ฒ์ 0.5.1๋ถํฐ Flax๋ฅผ ์ง์ํฉ๋๋ค! ์ด๋ฅผ ํตํด Colab, Kaggle, Google Cloud Platform์์ ์ฌ์ฉํ ์ ์๋ ๊ฒ์ฒ๋ผ Google TPU์์ ์ด๊ณ ์ ์ถ๋ก ์ด ๊ฐ๋ฅํฉ๋๋ค.
์ด ๋
ธํธ๋ถ์ JAX / Flax๋ฅผ ์ฌ์ฉํด ์ถ๋ก ์ ์คํํ๋ ๋ฐฉ๋ฒ์ ๋ณด์ฌ์ค๋๋ค. Stable Diffusion์ ์๋ ๋ฐฉ์์ ๋ํ ์์ธํ ๋ด์ฉ์ ์ํ๊ฑฐ๋ GPU์์ ์คํํ๋ ค๋ฉด ์ด [๋
ธํธ๋ถ] ](https://huggingface.co/docs/diffusers/stable_diffusion)์ ์ฐธ์กฐํ์ธ์.
๋จผ์ , TPU ๋ฐฑ์๋๋ฅผ ์ฌ์ฉํ๊ณ ์๋์ง ํ์ธํฉ๋๋ค. Colab์์ ์ด ๋
ธํธ๋ถ์ ์คํํ๋ ๊ฒฝ์ฐ, ๋ฉ๋ด์์ ๋ฐํ์์ ์ ํํ ๋ค์ "๋ฐํ์ ์ ํ ๋ณ๊ฒฝ" ์ต์
์ ์ ํํ ๋ค์ ํ๋์จ์ด ๊ฐ์๊ธฐ ์ค์ ์์ TPU๋ฅผ ์ ํํฉ๋๋ค.
JAX๋ TPU ์ ์ฉ์ ์๋์ง๋ง ๊ฐ TPU ์๋ฒ์๋ 8๊ฐ์ TPU ๊ฐ์๊ธฐ๊ฐ ๋ณ๋ ฌ๋ก ์๋ํ๊ธฐ ๋๋ฌธ์ ํด๋น ํ๋์จ์ด์์ ๋ ๋น์ ๋ฐํ๋ค๋ ์ ์ ์์๋์ธ์.
## Setup
๋จผ์ diffusers๊ฐ ์ค์น๋์ด ์๋์ง ํ์ธํฉ๋๋ค.
```bash
!pip install jax==0.3.25 jaxlib==0.3.25 flax transformers ftfy
!pip install diffusers
```
```python
import jax.tools.colab_tpu
jax.tools.colab_tpu.setup_tpu()
import jax
```
```python
num_devices = jax.device_count()
device_type = jax.devices()[0].device_kind
print(f"Found {num_devices} JAX devices of type {device_type}.")
assert (
"TPU" in device_type
), "Available device is not a TPU, please select TPU from Edit > Notebook settings > Hardware accelerator"
```
```python out
Found 8 JAX devices of type Cloud TPU.
```
๊ทธ๋ฐ ๋ค์ ๋ชจ๋ dependencies๋ฅผ ๊ฐ์ ธ์ต๋๋ค.
```python
import numpy as np
import jax
import jax.numpy as jnp
from pathlib import Path
from jax import pmap
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from PIL import Image
from huggingface_hub import notebook_login
from diffusers import FlaxStableDiffusionPipeline
```
## ๋ชจ๋ธ ๋ถ๋ฌ์ค๊ธฐ
TPU ์ฅ์น๋ ํจ์จ์ ์ธ half-float ์ ํ์ธ bfloat16์ ์ง์ํฉ๋๋ค. ํ
์คํธ์๋ ์ด ์ ํ์ ์ฌ์ฉํ์ง๋ง ๋์ float32๋ฅผ ์ฌ์ฉํ์ฌ ์ ์ฒด ์ ๋ฐ๋(full precision)๋ฅผ ์ฌ์ฉํ ์๋ ์์ต๋๋ค.
```python
dtype = jnp.bfloat16
```
Flax๋ ํจ์ํ ํ๋ ์์ํฌ์ด๋ฏ๋ก ๋ชจ๋ธ์ ๋ฌด์ํ(stateless)ํ์ด๋ฉฐ ๋งค๊ฐ๋ณ์๋ ๋ชจ๋ธ ์ธ๋ถ์ ์ ์ฅ๋ฉ๋๋ค. ์ฌ์ ํ์ต๋ Flax ํ์ดํ๋ผ์ธ์ ๋ถ๋ฌ์ค๋ฉด ํ์ดํ๋ผ์ธ ์์ฒด์ ๋ชจ๋ธ ๊ฐ์ค์น(๋๋ ๋งค๊ฐ๋ณ์)๊ฐ ๋ชจ๋ ๋ฐํ๋ฉ๋๋ค. ์ ํฌ๋ bf16 ๋ฒ์ ์ ๊ฐ์ค์น๋ฅผ ์ฌ์ฉํ๊ณ ์์ผ๋ฏ๋ก ์ ํ ๊ฒฝ๊ณ ๊ฐ ํ์๋์ง๋ง ๋ฌด์ํด๋ ๋ฉ๋๋ค.
```python
pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",
revision="bf16",
dtype=dtype,
)
```
## ์ถ๋ก
TPU์๋ ์ผ๋ฐ์ ์ผ๋ก 8๊ฐ์ ๋๋ฐ์ด์ค๊ฐ ๋ณ๋ ฌ๋ก ์๋ํ๋ฏ๋ก ๋ณด์ ํ ๋๋ฐ์ด์ค ์๋งํผ ํ๋กฌํํธ๋ฅผ ๋ณต์ ํฉ๋๋ค. ๊ทธ๋ฐ ๋ค์ ๊ฐ๊ฐ ํ๋์ ์ด๋ฏธ์ง ์์ฑ์ ๋ด๋นํ๋ 8๊ฐ์ ๋๋ฐ์ด์ค์์ ํ ๋ฒ์ ์ถ๋ก ์ ์ํํฉ๋๋ค. ๋ฐ๋ผ์ ํ๋์ ์นฉ์ด ํ๋์ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ๋ ๋ฐ ๊ฑธ๋ฆฌ๋ ์๊ฐ๊ณผ ๋์ผํ ์๊ฐ์ 8๊ฐ์ ์ด๋ฏธ์ง๋ฅผ ์ป์ ์ ์์ต๋๋ค.
ํ๋กฌํํธ๋ฅผ ๋ณต์ ํ๊ณ ๋๋ฉด ํ์ดํ๋ผ์ธ์ `prepare_inputs` ํจ์๋ฅผ ํธ์ถํ์ฌ ํ ํฐํ๋ ํ
์คํธ ID๋ฅผ ์ป์ต๋๋ค. ํ ํฐํ๋ ํ
์คํธ์ ๊ธธ์ด๋ ๊ธฐ๋ณธ CLIP ํ
์คํธ ๋ชจ๋ธ์ ๊ตฌ์ฑ์ ๋ฐ๋ผ 77ํ ํฐ์ผ๋ก ์ค์ ๋ฉ๋๋ค.
```python
prompt = "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of field, close up, split lighting, cinematic"
prompt = [prompt] * jax.device_count()
prompt_ids = pipeline.prepare_inputs(prompt)
prompt_ids.shape
```
```python out
(8, 77)
```
### ๋ณต์ฌ(Replication) ๋ฐ ์ ๋ ฌํ
๋ชจ๋ธ ๋งค๊ฐ๋ณ์์ ์
๋ ฅ๊ฐ์ ์ฐ๋ฆฌ๊ฐ ๋ณด์ ํ 8๊ฐ์ ๋ณ๋ ฌ ์ฅ์น์ ๋ณต์ฌ(Replication)๋์ด์ผ ํฉ๋๋ค. ๋งค๊ฐ๋ณ์ ๋์
๋๋ฆฌ๋ `flax.jax_utils.replicate`(๋์
๋๋ฆฌ๋ฅผ ์ํํ๋ฉฐ ๊ฐ์ค์น์ ๋ชจ์์ ๋ณ๊ฒฝํ์ฌ 8๋ฒ ๋ฐ๋ณตํ๋ ํจ์)๋ฅผ ์ฌ์ฉํ์ฌ ๋ณต์ฌ๋ฉ๋๋ค. ๋ฐฐ์ด์ `shard`๋ฅผ ์ฌ์ฉํ์ฌ ๋ณต์ ๋ฉ๋๋ค.
```python
p_params = replicate(params)
```
```python
prompt_ids = shard(prompt_ids)
prompt_ids.shape
```
```python out
(8, 1, 77)
```
์ด shape์ 8๊ฐ์ ๋๋ฐ์ด์ค ๊ฐ๊ฐ์ด shape `(1, 77)`์ jnp ๋ฐฐ์ด์ ์
๋ ฅ๊ฐ์ผ๋ก ๋ฐ๋๋ค๋ ์๋ฏธ์
๋๋ค. ์ฆ 1์ ๋๋ฐ์ด์ค๋น batch(๋ฐฐ์น) ํฌ๊ธฐ์
๋๋ค. ๋ฉ๋ชจ๋ฆฌ๊ฐ ์ถฉ๋ถํ TPU์์๋ ํ ๋ฒ์ ์ฌ๋ฌ ์ด๋ฏธ์ง(์นฉ๋น)๋ฅผ ์์ฑํ๋ ค๋ ๊ฒฝ์ฐ 1๋ณด๋ค ํด ์ ์์ต๋๋ค.
์ด๋ฏธ์ง๋ฅผ ์์ฑํ ์ค๋น๊ฐ ๊ฑฐ์ ์๋ฃ๋์์ต๋๋ค! ์ด์ ์์ฑ ํจ์์ ์ ๋ฌํ ๋์ ์์ฑ๊ธฐ๋ง ๋ง๋ค๋ฉด ๋ฉ๋๋ค. ์ด๊ฒ์ ๋์๋ฅผ ๋ค๋ฃจ๋ ๋ชจ๋ ํจ์์ ๋์ ์์ฑ๊ธฐ๊ฐ ์์ด์ผ ํ๋ค๋, ๋์์ ๋ํด ๋งค์ฐ ์ง์งํ๊ณ ๋
๋จ์ ์ธ Flax์ ํ์ค ์ ์ฐจ์
๋๋ค. ์ด๋ ๊ฒ ํ๋ฉด ์ฌ๋ฌ ๋ถ์ฐ๋ ๊ธฐ๊ธฐ์์ ํ๋ จํ ๋์๋ ์ฌํ์ฑ์ด ๋ณด์ฅ๋ฉ๋๋ค.
์๋ ํฌํผ ํจ์๋ ์๋๋ฅผ ์ฌ์ฉํ์ฌ ๋์ ์์ฑ๊ธฐ๋ฅผ ์ด๊ธฐํํฉ๋๋ค. ๋์ผํ ์๋๋ฅผ ์ฌ์ฉํ๋ ํ ์ ํํ ๋์ผํ ๊ฒฐ๊ณผ๋ฅผ ์ป์ ์ ์์ต๋๋ค. ๋์ค์ ๋
ธํธ๋ถ์์ ๊ฒฐ๊ณผ๋ฅผ ํ์ํ ๋์ ๋ค๋ฅธ ์๋๋ฅผ ์์ ๋กญ๊ฒ ์ฌ์ฉํ์ธ์.
```python
def create_key(seed=0):
return jax.random.PRNGKey(seed)
```
rng๋ฅผ ์ป์ ๋ค์ 8๋ฒ '๋ถํ 'ํ์ฌ ๊ฐ ๋๋ฐ์ด์ค๊ฐ ๋ค๋ฅธ ์ ๋๋ ์ดํฐ๋ฅผ ์์ ํ๋๋ก ํฉ๋๋ค. ๋ฐ๋ผ์ ๊ฐ ๋๋ฐ์ด์ค๋ง๋ค ๋ค๋ฅธ ์ด๋ฏธ์ง๊ฐ ์์ฑ๋๋ฉฐ ์ ์ฒด ํ๋ก์ธ์ค๋ฅผ ์ฌํํ ์ ์์ต๋๋ค.
```python
rng = create_key(0)
rng = jax.random.split(rng, jax.device_count())
```
JAX ์ฝ๋๋ ๋งค์ฐ ๋น ๋ฅด๊ฒ ์คํ๋๋ ํจ์จ์ ์ธ ํํ์ผ๋ก ์ปดํ์ผํ ์ ์์ต๋๋ค. ํ์ง๋ง ํ์ ํธ์ถ์์ ๋ชจ๋ ์
๋ ฅ์ด ๋์ผํ ๋ชจ์์ ๊ฐ๋๋ก ํด์ผ ํ๋ฉฐ, ๊ทธ๋ ์ง ์์ผ๋ฉด JAX๊ฐ ์ฝ๋๋ฅผ ๋ค์ ์ปดํ์ผํด์ผ ํ๋ฏ๋ก ์ต์ ํ๋ ์๋๋ฅผ ํ์ฉํ ์ ์์ต๋๋ค.
`jit = True`๋ฅผ ์ธ์๋ก ์ ๋ฌํ๋ฉด Flax ํ์ดํ๋ผ์ธ์ด ์ฝ๋๋ฅผ ์ปดํ์ผํ ์ ์์ต๋๋ค. ๋ํ ๋ชจ๋ธ์ด ์ฌ์ฉ ๊ฐ๋ฅํ 8๊ฐ์ ๋๋ฐ์ด์ค์์ ๋ณ๋ ฌ๋ก ์คํ๋๋๋ก ๋ณด์ฅํฉ๋๋ค.
๋ค์ ์
์ ์ฒ์ ์คํํ๋ฉด ์ปดํ์ผํ๋ ๋ฐ ์๊ฐ์ด ์ค๋ ๊ฑธ๋ฆฌ์ง๋ง ์ดํ ํธ์ถ(์
๋ ฅ์ด ๋ค๋ฅธ ๊ฒฝ์ฐ์๋)์ ํจ์ฌ ๋นจ๋ผ์ง๋๋ค. ์๋ฅผ ๋ค์ด, ํ
์คํธํ์ ๋ TPU v2-8์์ ์ปดํ์ผํ๋ ๋ฐ 1๋ถ ์ด์ ๊ฑธ๋ฆฌ์ง๋ง ์ดํ ์ถ๋ก ์คํ์๋ ์ฝ 7์ด๊ฐ ๊ฑธ๋ฆฝ๋๋ค.
```
%%time
images = pipeline(prompt_ids, p_params, rng, jit=True)[0]
```
```python out
CPU times: user 56.2 s, sys: 42.5 s, total: 1min 38s
Wall time: 1min 29s
```
๋ฐํ๋ ๋ฐฐ์ด์ shape์ `(8, 1, 512, 512, 3)`์
๋๋ค. ์ด๋ฅผ ์ฌ๊ตฌ์ฑํ์ฌ ๋ ๋ฒ์งธ ์ฐจ์์ ์ ๊ฑฐํ๊ณ 512 ร 512 ร 3์ ์ด๋ฏธ์ง 8๊ฐ๋ฅผ ์ป์ ๋ค์ PIL๋ก ๋ณํํฉ๋๋ค.
```python
images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
images = pipeline.numpy_to_pil(images)
```
### ์๊ฐํ
์ด๋ฏธ์ง๋ฅผ ๊ทธ๋ฆฌ๋์ ํ์ํ๋ ๋์ฐ๋ฏธ ํจ์๋ฅผ ๋ง๋ค์ด ๋ณด๊ฒ ์ต๋๋ค.
```python
def image_grid(imgs, rows, cols):
w, h = imgs[0].size
grid = Image.new("RGB", size=(cols * w, rows * h))
for i, img in enumerate(imgs):
grid.paste(img, box=(i % cols * w, i // cols * h))
return grid
```
```python
image_grid(images, 2, 4)
```

## ๋ค๋ฅธ ํ๋กฌํํธ ์ฌ์ฉ
๋ชจ๋ ๋๋ฐ์ด์ค์์ ๋์ผํ ํ๋กฌํํธ๋ฅผ ๋ณต์ ํ ํ์๋ ์์ต๋๋ค. ํ๋กฌํํธ 2๊ฐ๋ฅผ ๊ฐ๊ฐ 4๋ฒ์ฉ ์์ฑํ๊ฑฐ๋ ํ ๋ฒ์ 8๊ฐ์ ์๋ก ๋ค๋ฅธ ํ๋กฌํํธ๋ฅผ ์์ฑํ๋ ๋ฑ ์ํ๋ ๊ฒ์ ๋ฌด์์ด๋ ํ ์ ์์ต๋๋ค. ํ๋ฒ ํด๋ณด์ธ์!
๋จผ์ ์
๋ ฅ ์ค๋น ์ฝ๋๋ฅผ ํธ๋ฆฌํ ํจ์๋ก ๋ฆฌํฉํฐ๋งํ๊ฒ ์ต๋๋ค:
```python
prompts = [
"Labrador in the style of Hokusai",
"Painting of a squirrel skating in New York",
"HAL-9000 in the style of Van Gogh",
"Times Square under water, with fish and a dolphin swimming around",
"Ancient Roman fresco showing a man working on his laptop",
"Close-up photograph of young black woman against urban background, high quality, bokeh",
"Armchair in the shape of an avocado",
"Clown astronaut in space, with Earth in the background",
]
```
```python
prompt_ids = pipeline.prepare_inputs(prompts)
prompt_ids = shard(prompt_ids)
images = pipeline(prompt_ids, p_params, rng, jit=True).images
images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
images = pipeline.numpy_to_pil(images)
image_grid(images, 2, 4)
```

## ๋ณ๋ ฌํ(parallelization)๋ ์ด๋ป๊ฒ ์๋ํ๋๊ฐ?
์์ `diffusers` Flax ํ์ดํ๋ผ์ธ์ด ๋ชจ๋ธ์ ์๋์ผ๋ก ์ปดํ์ผํ๊ณ ์ฌ์ฉ ๊ฐ๋ฅํ ๋ชจ๋ ๊ธฐ๊ธฐ์์ ๋ณ๋ ฌ๋ก ์คํํ๋ค๊ณ ๋ง์๋๋ ธ์ต๋๋ค. ์ด์ ๊ทธ ํ๋ก์ธ์ค๋ฅผ ๊ฐ๋ตํ๊ฒ ์ดํด๋ณด๊ณ ์๋ ๋ฐฉ์์ ๋ณด์ฌ๋๋ฆฌ๊ฒ ์ต๋๋ค.
JAX ๋ณ๋ ฌํ๋ ์ฌ๋ฌ ๊ฐ์ง ๋ฐฉ๋ฒ์ผ๋ก ์ํํ ์ ์์ต๋๋ค. ๊ฐ์ฅ ์ฌ์ด ๋ฐฉ๋ฒ์ jax.pmap ํจ์๋ฅผ ์ฌ์ฉํ์ฌ ๋จ์ผ ํ๋ก๊ทธ๋จ, ๋ค์ค ๋ฐ์ดํฐ(SPMD) ๋ณ๋ ฌํ๋ฅผ ๋ฌ์ฑํ๋ ๊ฒ์
๋๋ค. ์ฆ, ๋์ผํ ์ฝ๋์ ๋ณต์ฌ๋ณธ์ ๊ฐ๊ฐ ๋ค๋ฅธ ๋ฐ์ดํฐ ์
๋ ฅ์ ๋ํด ์ฌ๋ฌ ๊ฐ ์คํํ๋ ๊ฒ์
๋๋ค. ๋ ์ ๊ตํ ์ ๊ทผ ๋ฐฉ์๋ ๊ฐ๋ฅํ๋ฏ๋ก ๊ด์ฌ์ด ์์ผ์๋ค๋ฉด [JAX ๋ฌธ์](https://jax.readthedocs.io/en/latest/index.html)์ [`pjit` ํ์ด์ง](https://jax.readthedocs.io/en/latest/jax-101/08-pjit.html?highlight=pjit)์์ ์ด ์ฃผ์ ๋ฅผ ์ดํด๋ณด์๊ธฐ ๋ฐ๋๋๋ค!
`jax.pmap`์ ๋ ๊ฐ์ง ๊ธฐ๋ฅ์ ์ํํฉ๋๋ค:
- `jax.jit()`๋ฅผ ํธ์ถํ ๊ฒ์ฒ๋ผ ์ฝ๋๋ฅผ ์ปดํ์ผ(๋๋ `jit`)ํฉ๋๋ค. ์ด ์์
์ `pmap`์ ํธ์ถํ ๋๊ฐ ์๋๋ผ pmapped ํจ์๊ฐ ์ฒ์ ํธ์ถ๋ ๋ ์ํ๋ฉ๋๋ค.
- ์ปดํ์ผ๋ ์ฝ๋๊ฐ ์ฌ์ฉ ๊ฐ๋ฅํ ๋ชจ๋ ๊ธฐ๊ธฐ์์ ๋ณ๋ ฌ๋ก ์คํ๋๋๋ก ํฉ๋๋ค.
์๋ ๋ฐฉ์์ ๋ณด์ฌ๋๋ฆฌ๊ธฐ ์ํด ์ด๋ฏธ์ง ์์ฑ์ ์คํํ๋ ๋น๊ณต๊ฐ ๋ฉ์๋์ธ ํ์ดํ๋ผ์ธ์ `_generate` ๋ฉ์๋๋ฅผ `pmap`ํฉ๋๋ค. ์ด ๋ฉ์๋๋ ํฅํ `Diffusers` ๋ฆด๋ฆฌ์ค์์ ์ด๋ฆ์ด ๋ณ๊ฒฝ๋๊ฑฐ๋ ์ ๊ฑฐ๋ ์ ์๋ค๋ ์ ์ ์ ์ํ์ธ์.
```python
p_generate = pmap(pipeline._generate)
```
`pmap`์ ์ฌ์ฉํ ํ ์ค๋น๋ ํจ์ `p_generate`๋ ๊ฐ๋
์ ์ผ๋ก ๋ค์์ ์ํํฉ๋๋ค:
* ๊ฐ ์ฅ์น์์ ๊ธฐ๋ณธ ํจ์ `pipeline._generate`์ ๋ณต์ฌ๋ณธ์ ํธ์ถํฉ๋๋ค.
* ๊ฐ ์ฅ์น์ ์
๋ ฅ ์ธ์์ ๋ค๋ฅธ ๋ถ๋ถ์ ๋ณด๋
๋๋ค. ์ด๊ฒ์ด ๋ฐ๋ก ์ค๋ฉ์ด ์ฌ์ฉ๋๋ ์ด์ ์
๋๋ค. ์ด ๊ฒฝ์ฐ `prompt_ids`์ shape์ `(8, 1, 77, 768)`์
๋๋ค. ์ด ๋ฐฐ์ด์ 8๊ฐ๋ก ๋ถํ ๋๊ณ `_generate`์ ๊ฐ ๋ณต์ฌ๋ณธ์ `(1, 77, 768)`์ shape์ ๊ฐ์ง ์
๋ ฅ์ ๋ฐ๊ฒ ๋ฉ๋๋ค.
๋ณ๋ ฌ๋ก ํธ์ถ๋๋ค๋ ์ฌ์ค์ ์์ ํ ๋ฌด์ํ๊ณ `_generate`๋ฅผ ์ฝ๋ฉํ ์ ์์ต๋๋ค. batch(๋ฐฐ์น) ํฌ๊ธฐ(์ด ์์ ์์๋ `1`)์ ์ฝ๋์ ์ ํฉํ ์ฐจ์๋ง ์ ๊ฒฝ ์ฐ๋ฉด ๋๋ฉฐ, ๋ณ๋ ฌ๋ก ์๋ํ๊ธฐ ์ํด ์๋ฌด๊ฒ๋ ๋ณ๊ฒฝํ ํ์๊ฐ ์์ต๋๋ค.
ํ์ดํ๋ผ์ธ ํธ์ถ์ ์ฌ์ฉํ ๋์ ๋ง์ฐฌ๊ฐ์ง๋ก, ๋ค์ ์
์ ์ฒ์ ์คํํ ๋๋ ์๊ฐ์ด ๊ฑธ๋ฆฌ์ง๋ง ๊ทธ ์ดํ์๋ ํจ์ฌ ๋นจ๋ผ์ง๋๋ค.
```
%%time
images = p_generate(prompt_ids, p_params, rng)
images = images.block_until_ready()
images.shape
```
```python out
CPU times: user 1min 15s, sys: 18.2 s, total: 1min 34s
Wall time: 1min 15s
```
```python
images.shape
```
```python out
(8, 1, 512, 512, 3)
```
JAX๋ ๋น๋๊ธฐ ๋์คํจ์น๋ฅผ ์ฌ์ฉํ๊ณ ๊ฐ๋ฅํ ํ ๋นจ๋ฆฌ ์ ์ด๊ถ์ Python ๋ฃจํ์ ๋ฐํํ๊ธฐ ๋๋ฌธ์ ์ถ๋ก ์๊ฐ์ ์ ํํ๊ฒ ์ธก์ ํ๊ธฐ ์ํด `block_until_ready()`๋ฅผ ์ฌ์ฉํฉ๋๋ค. ์์ง ๊ตฌ์ฒดํ๋์ง ์์ ๊ณ์ฐ ๊ฒฐ๊ณผ๋ฅผ ์ฌ์ฉํ๋ ค๋ ๊ฒฝ์ฐ ์๋์ผ๋ก ์ฐจ๋จ์ด ์ํ๋๋ฏ๋ก ์ฝ๋์์ ์ด ํจ์๋ฅผ ์ฌ์ฉํ ํ์๊ฐ ์์ต๋๋ค.
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/using-diffusers/unconditional_image_generation.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Unconditional ์ด๋ฏธ์ง ์์ฑ
[[open-in-colab]]
Unconditional ์ด๋ฏธ์ง ์์ฑ์ ๋น๊ต์ ๊ฐ๋จํ ์์
์
๋๋ค. ๋ชจ๋ธ์ด ํ
์คํธ๋ ์ด๋ฏธ์ง์ ๊ฐ์ ์ถ๊ฐ ์กฐ๊ฑด ์์ด ์ด๋ฏธ ํ์ต๋ ํ์ต ๋ฐ์ดํฐ์ ์ ์ฌํ ์ด๋ฏธ์ง๋ง ์์ฑํฉ๋๋ค.
['DiffusionPipeline']์ ์ถ๋ก ์ ์ํด ๋ฏธ๋ฆฌ ํ์ต๋ diffusion ์์คํ
์ ์ฌ์ฉํ๋ ๊ฐ์ฅ ์ฌ์ด ๋ฐฉ๋ฒ์
๋๋ค.
๋จผ์ ['DiffusionPipeline']์ ์ธ์คํด์ค๋ฅผ ์์ฑํ๊ณ ๋ค์ด๋ก๋ํ ํ์ดํ๋ผ์ธ์ [์ฒดํฌํฌ์ธํธ](https://huggingface.co/models?library=diffusers&sort=downloads)๋ฅผ ์ง์ ํฉ๋๋ค. ํ๋ธ์ ๐งจ diffusion ์ฒดํฌํฌ์ธํธ ์ค ํ๋๋ฅผ ์ฌ์ฉํ ์ ์์ต๋๋ค(์ฌ์ฉํ ์ฒดํฌํฌ์ธํธ๋ ๋๋น ์ด๋ฏธ์ง๋ฅผ ์์ฑํฉ๋๋ค).
<Tip>
๐ก ๋๋ง์ unconditional ์ด๋ฏธ์ง ์์ฑ ๋ชจ๋ธ์ ํ์ต์ํค๊ณ ์ถ์ผ์ ๊ฐ์? ํ์ต ๊ฐ์ด๋๋ฅผ ์ดํด๋ณด๊ณ ๋๋ง์ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ๋ ๋ฐฉ๋ฒ์ ์์๋ณด์ธ์.
</Tip>
์ด ๊ฐ์ด๋์์๋ unconditional ์ด๋ฏธ์ง ์์ฑ์ ['DiffusionPipeline']๊ณผ [DDPM](https://arxiv.org/abs/2006.11239)์ ์ฌ์ฉํฉ๋๋ค:
```python
>>> from diffusers import DiffusionPipeline
>>> generator = DiffusionPipeline.from_pretrained("anton-l/ddpm-butterflies-128")
```
[diffusion ํ์ดํ๋ผ์ธ]์ ๋ชจ๋ ๋ชจ๋ธ๋ง, ํ ํฐํ, ์ค์ผ์ค๋ง ๊ตฌ์ฑ ์์๋ฅผ ๋ค์ด๋ก๋ํ๊ณ ์บ์ํฉ๋๋ค. ์ด ๋ชจ๋ธ์ ์ฝ 14์ต ๊ฐ์ ํ๋ผ๋ฏธํฐ๋ก ๊ตฌ์ฑ๋์ด ์๊ธฐ ๋๋ฌธ์ GPU์์ ์คํํ ๊ฒ์ ๊ฐ๋ ฅํ ๊ถ์ฅํฉ๋๋ค. PyTorch์์์ ๋ง์ฐฌ๊ฐ์ง๋ก ์ ๋๋ ์ดํฐ ๊ฐ์ฒด๋ฅผ GPU๋ก ์ฎ๊ธธ ์ ์์ต๋๋ค:
```python
>>> generator.to("cuda")
```
์ด์ ์ ๋๋ ์ดํฐ๋ฅผ ์ฌ์ฉํ์ฌ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ ์ ์์ต๋๋ค:
```python
>>> image = generator().images[0]
```
์ถ๋ ฅ์ ๊ธฐ๋ณธ์ ์ผ๋ก [PIL.Image](https://pillow.readthedocs.io/en/stable/reference/Image.html?highlight=image#the-image-class) ๊ฐ์ฒด๋ก ๊ฐ์ธ์ง๋๋ค.
๋ค์์ ํธ์ถํ์ฌ ์ด๋ฏธ์ง๋ฅผ ์ ์ฅํ ์ ์์ต๋๋ค:
```python
>>> image.save("generated_image.png")
```
์๋ ์คํ์ด์ค(๋ฐ๋ชจ ๋งํฌ)๋ฅผ ์ด์ฉํด ๋ณด๊ณ , ์ถ๋ก ๋จ๊ณ์ ๋งค๊ฐ๋ณ์๋ฅผ ์์ ๋กญ๊ฒ ์กฐ์ ํ์ฌ ์ด๋ฏธ์ง ํ์ง์ ์ด๋ค ์ํฅ์ ๋ฏธ์น๋์ง ํ์ธํด ๋ณด์ธ์!
<iframe src="https://stevhliu-ddpm-butterflies-128.hf.space" frameborder="0" width="850" height="500"></iframe>
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/using-diffusers/reproducibility.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# ์ฌํ ๊ฐ๋ฅํ ํ์ดํ๋ผ์ธ ์์ฑํ๊ธฐ
[[open-in-colab]]
์ฌํ์ฑ์ ํ
์คํธ, ๊ฒฐ๊ณผ ์ฌํ, ๊ทธ๋ฆฌ๊ณ [์ด๋ฏธ์ง ํ๋ฆฌํฐ ๋์ด๊ธฐ](resuing_seeds)์์ ์ค์ํฉ๋๋ค.
๊ทธ๋ฌ๋ diffusion ๋ชจ๋ธ์ ๋ฌด์์์ฑ์ ๋งค๋ฒ ๋ชจ๋ธ์ด ๋์๊ฐ ๋๋ง๋ค ํ์ดํ๋ผ์ธ์ด ๋ค๋ฅธ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ ์ ์๋๋ก ํ๋ ์ด์ ๋ก ํ์ํฉ๋๋ค.
ํ๋ซํผ ๊ฐ์ ์ ํํ๊ฒ ๋์ผํ ๊ฒฐ๊ณผ๋ฅผ ์ป์ ์๋ ์์ง๋ง, ํน์ ํ์ฉ ๋ฒ์ ๋ด์์ ๋ฆด๋ฆฌ์ค ๋ฐ ํ๋ซํผ ๊ฐ์ ๊ฒฐ๊ณผ๋ฅผ ์ฌํํ ์๋ ์์ต๋๋ค.
๊ทธ๋ผ์๋ diffusion ํ์ดํ๋ผ์ธ๊ณผ ์ฒดํฌํฌ์ธํธ์ ๋ฐ๋ผ ํ์ฉ ์ค์ฐจ๊ฐ ๋ฌ๋ผ์ง๋๋ค.
diffusion ๋ชจ๋ธ์์ ๋ฌด์์์ฑ์ ์์ฒ์ ์ ์ดํ๊ฑฐ๋ ๊ฒฐ์ ๋ก ์ ์๊ณ ๋ฆฌ์ฆ์ ์ฌ์ฉํ๋ ๋ฐฉ๋ฒ์ ์ดํดํ๋ ๊ฒ์ด ์ค์ํ ์ด์ ์
๋๋ค.
<Tip>
๐ก Pytorch์ [์ฌํ์ฑ์ ๋ํ ์ ์ธ](https://pytorch.org/docs/stable/notes/randomness.html)๋ฅผ ๊ผญ ์ฝ์ด๋ณด๊ธธ ์ถ์ฒํฉ๋๋ค:
> ์์ ํ๊ฒ ์ฌํ๊ฐ๋ฅํ ๊ฒฐ๊ณผ๋ Pytorch ๋ฐฐํฌ, ๊ฐ๋ณ์ ์ธ ์ปค๋ฐ, ํน์ ๋ค๋ฅธ ํ๋ซํผ๋ค์์ ๋ณด์ฅ๋์ง ์์ต๋๋ค.
> ๋ํ, ๊ฒฐ๊ณผ๋ CPU์ GPU ์คํ๊ฐ์ ์ฌ์ง์ด ๊ฐ์ seed๋ฅผ ์ฌ์ฉํ ๋๋ ์ฌํ ๊ฐ๋ฅํ์ง ์์ ์ ์์ต๋๋ค.
</Tip>
## ๋ฌด์์์ฑ ์ ์ดํ๊ธฐ
์ถ๋ก ์์, ํ์ดํ๋ผ์ธ์ ๋
ธ์ด์ฆ๋ฅผ ์ค์ด๊ธฐ ์ํด ๊ฐ์ฐ์์ ๋
ธ์ด์ฆ๋ฅผ ์์ฑํ๊ฑฐ๋ ์ค์ผ์ค๋ง ๋จ๊ณ์ ๋
ธ์ด์ฆ๋ฅผ ๋ํ๋ ๋ฑ์ ๋๋ค ์ํ๋ง ์คํ์ ํฌ๊ฒ ์์กดํฉ๋๋ค,
[DDIMPipeline](https://huggingface.co/docs/diffusers/v0.18.0/en/api/pipelines/ddim#diffusers.DDIMPipeline)์์ ๋ ์ถ๋ก ๋จ๊ณ ์ดํ์ ํ
์ ๊ฐ์ ์ดํด๋ณด์ธ์:
```python
from diffusers import DDIMPipeline
import numpy as np
model_id = "google/ddpm-cifar10-32"
# ๋ชจ๋ธ๊ณผ ์ค์ผ์ค๋ฌ๋ฅผ ๋ถ๋ฌ์ค๊ธฐ
ddim = DDIMPipeline.from_pretrained(model_id)
# ๋ ๊ฐ์ ๋จ๊ณ์ ๋ํด์ ํ์ดํ๋ผ์ธ์ ์คํํ๊ณ numpy tensor๋ก ๊ฐ์ ๋ฐํํ๊ธฐ
image = ddim(num_inference_steps=2, output_type="np").images
print(np.abs(image).sum())
```
์์ ์ฝ๋๋ฅผ ์คํํ๋ฉด ํ๋์ ๊ฐ์ด ๋์ค์ง๋ง, ๋ค์ ์คํํ๋ฉด ๋ค๋ฅธ ๊ฐ์ด ๋์ต๋๋ค. ๋ฌด์จ ์ผ์ด ์ผ์ด๋๊ณ ์๋ ๊ฑธ๊น์?
ํ์ดํ๋ผ์ธ์ด ์คํ๋ ๋๋ง๋ค, [torch.randn](https://pytorch.org/docs/stable/generated/torch.randn.html)์
๋จ๊ณ์ ์ผ๋ก ๋
ธ์ด์ฆ ์ ๊ฑฐ๋๋ ๊ฐ์ฐ์์ ๋
ธ์ด์ฆ๊ฐ ์์ฑํ๊ธฐ ์ํ ๋ค๋ฅธ ๋๋ค seed๋ฅผ ์ฌ์ฉํฉ๋๋ค.
๊ทธ๋ฌ๋ ๋์ผํ ์ด๋ฏธ์ง๋ฅผ ์์ ์ ์ผ๋ก ์์ฑํด์ผ ํ๋ ๊ฒฝ์ฐ์๋ CPU์์ ํ์ดํ๋ผ์ธ์ ์คํํ๋์ง GPU์์ ์คํํ๋์ง์ ๋ฐ๋ผ ๋ฌ๋ผ์ง๋๋ค.
### CPU
CPU์์ ์ฌํ ๊ฐ๋ฅํ ๊ฒฐ๊ณผ๋ฅผ ์์ฑํ๋ ค๋ฉด, PyTorch [Generator](https://pytorch.org/docs/stable/generated/torch.randn.html)๋ก seed๋ฅผ ๊ณ ์ ํฉ๋๋ค:
```python
import torch
from diffusers import DDIMPipeline
import numpy as np
model_id = "google/ddpm-cifar10-32"
# ๋ชจ๋ธ๊ณผ ์ค์ผ์ค๋ฌ ๋ถ๋ฌ์ค๊ธฐ
ddim = DDIMPipeline.from_pretrained(model_id)
# ์ฌํ์ฑ์ ์ํด generator ๋ง๋ค๊ธฐ
generator = torch.Generator(device="cpu").manual_seed(0)
# ๋ ๊ฐ์ ๋จ๊ณ์ ๋ํด์ ํ์ดํ๋ผ์ธ์ ์คํํ๊ณ numpy tensor๋ก ๊ฐ์ ๋ฐํํ๊ธฐ
image = ddim(num_inference_steps=2, output_type="np", generator=generator).images
print(np.abs(image).sum())
```
์ด์ ์์ ์ฝ๋๋ฅผ ์คํํ๋ฉด seed๋ฅผ ๊ฐ์ง `Generator` ๊ฐ์ฒด๊ฐ ํ์ดํ๋ผ์ธ์ ๋ชจ๋ ๋๋ค ํจ์์ ์ ๋ฌ๋๋ฏ๋ก ํญ์ `1491.1711` ๊ฐ์ด ์ถ๋ ฅ๋ฉ๋๋ค.
ํน์ ํ๋์จ์ด ๋ฐ PyTorch ๋ฒ์ ์์ ์ด ์ฝ๋ ์์ ๋ฅผ ์คํํ๋ฉด ๋์ผํ์ง๋ ์๋๋ผ๋ ์ ์ฌํ ๊ฒฐ๊ณผ๋ฅผ ์ป์ ์ ์์ต๋๋ค.
<Tip>
๐ก ์ฒ์์๋ ์๋๋ฅผ ๋ํ๋ด๋ ์ ์๊ฐ ๋์ ์ `Generator` ๊ฐ์ฒด๋ฅผ ํ์ดํ๋ผ์ธ์ ์ ๋ฌํ๋ ๊ฒ์ด ์ฝ๊ฐ ๋น์ง๊ด์ ์ผ ์ ์์ง๋ง,
`Generator`๋ ์์ฐจ์ ์ผ๋ก ์ฌ๋ฌ ํ์ดํ๋ผ์ธ์ ์ ๋ฌ๋ ์ ์๋ \๋๋ค์ํ\์ด๊ธฐ ๋๋ฌธ์ PyTorch์์ ํ๋ฅ ๋ก ์ ๋ชจ๋ธ์ ๋ค๋ฃฐ ๋ ๊ถ์ฅ๋๋ ์ค๊ณ์
๋๋ค.
</Tip>
### GPU
์๋ฅผ ๋ค๋ฉด, GPU ์์์ ๊ฐ์ ์ฝ๋ ์์๋ฅผ ์คํํ๋ฉด:
```python
import torch
from diffusers import DDIMPipeline
import numpy as np
model_id = "google/ddpm-cifar10-32"
# ๋ชจ๋ธ๊ณผ ์ค์ผ์ค๋ฌ ๋ถ๋ฌ์ค๊ธฐ
ddim = DDIMPipeline.from_pretrained(model_id)
ddim.to("cuda")
# ์ฌํ์ฑ์ ์ํ generator ๋ง๋ค๊ธฐ
generator = torch.Generator(device="cuda").manual_seed(0)
# ๋ ๊ฐ์ ๋จ๊ณ์ ๋ํด์ ํ์ดํ๋ผ์ธ์ ์คํํ๊ณ numpy tensor๋ก ๊ฐ์ ๋ฐํํ๊ธฐ
image = ddim(num_inference_steps=2, output_type="np", generator=generator).images
print(np.abs(image).sum())
```
GPU๊ฐ CPU์ ๋ค๋ฅธ ๋์ ์์ฑ๊ธฐ๋ฅผ ์ฌ์ฉํ๊ธฐ ๋๋ฌธ์ ๋์ผํ ์๋๋ฅผ ์ฌ์ฉํ๋๋ผ๋ ๊ฒฐ๊ณผ๊ฐ ๊ฐ์ง ์์ต๋๋ค.
์ด ๋ฌธ์ ๋ฅผ ํผํ๊ธฐ ์ํด ๐งจ Diffusers๋ CPU์ ์์์ ๋
ธ์ด์ฆ๋ฅผ ์์ฑํ ๋ค์ ํ์์ ๋ฐ๋ผ ํ
์๋ฅผ GPU๋ก ์ด๋์ํค๋
[randn_tensor()](https://huggingface.co/docs/diffusers/v0.18.0/en/api/utilities#diffusers.utils.randn_tensor)๊ธฐ๋ฅ์ ๊ฐ์ง๊ณ ์์ต๋๋ค.
`randn_tensor` ๊ธฐ๋ฅ์ ํ์ดํ๋ผ์ธ ๋ด๋ถ ์ด๋์์๋ ์ฌ์ฉ๋๋ฏ๋ก ํ์ดํ๋ผ์ธ์ด GPU์์ ์คํ๋๋๋ผ๋ **ํญ์** CPU `Generator`๋ฅผ ํต๊ณผํ ์ ์์ต๋๋ค.
์ด์ ๊ฒฐ๊ณผ์ ํจ์ฌ ๋ ๋ค๊ฐ์์ต๋๋ค!
```python
import torch
from diffusers import DDIMPipeline
import numpy as np
model_id = "google/ddpm-cifar10-32"
# ๋ชจ๋ธ๊ณผ ์ค์ผ์ค๋ฌ ๋ถ๋ฌ์ค๊ธฐ
ddim = DDIMPipeline.from_pretrained(model_id)
ddim.to("cuda")
#์ฌํ์ฑ์ ์ํ generator ๋ง๋ค๊ธฐ (GPU์ ์ฌ๋ฆฌ์ง ์๋๋ก ์กฐ์ฌํ๋ค!)
generator = torch.manual_seed(0)
# ๋ ๊ฐ์ ๋จ๊ณ์ ๋ํด์ ํ์ดํ๋ผ์ธ์ ์คํํ๊ณ numpy tensor๋ก ๊ฐ์ ๋ฐํํ๊ธฐ
image = ddim(num_inference_steps=2, output_type="np", generator=generator).images
print(np.abs(image).sum())
```
<Tip>
๐ก ์ฌํ์ฑ์ด ์ค์ํ ๊ฒฝ์ฐ์๋ ํญ์ CPU generator๋ฅผ ์ ๋ฌํ๋ ๊ฒ์ด ์ข์ต๋๋ค.
์ฑ๋ฅ ์์ค์ ๋ฌด์ํ ์ ์๋ ๊ฒฝ์ฐ๊ฐ ๋ง์ผ๋ฉฐ ํ์ดํ๋ผ์ธ์ด GPU์์ ์คํ๋์์ ๋๋ณด๋ค ํจ์ฌ ๋ ๋น์ทํ ๊ฐ์ ์์ฑํ ์ ์์ต๋๋ค.
</Tip>
๋ง์ง๋ง์ผ๋ก [UnCLIPPipeline](https://huggingface.co/docs/diffusers/v0.18.0/en/api/pipelines/unclip#diffusers.UnCLIPPipeline)๊ณผ ๊ฐ์
๋ ๋ณต์กํ ํ์ดํ๋ผ์ธ์ ๊ฒฝ์ฐ, ์ด๋ค์ ์ข
์ข
์ ๋ฐ ์ค์ฐจ ์ ํ์ ๊ทน๋๋ก ์ทจ์ฝํฉ๋๋ค.
๋ค๋ฅธ GPU ํ๋์จ์ด ๋๋ PyTorch ๋ฒ์ ์์ ์ ์ฌํ ๊ฒฐ๊ณผ๋ฅผ ๊ธฐ๋ํ์ง ๋ง์ธ์.
์ด ๊ฒฝ์ฐ ์์ ํ ์ฌํ์ฑ์ ์ํด ์์ ํ ๋์ผํ ํ๋์จ์ด ๋ฐ PyTorch ๋ฒ์ ์ ์คํํด์ผ ํฉ๋๋ค.
## ๊ฒฐ์ ๋ก ์ ์๊ณ ๋ฆฌ์ฆ
๊ฒฐ์ ๋ก ์ ์๊ณ ๋ฆฌ์ฆ์ ์ฌ์ฉํ์ฌ ์ฌํ ๊ฐ๋ฅํ ํ์ดํ๋ผ์ธ์ ์์ฑํ๋๋ก PyTorch๋ฅผ ๊ตฌ์ฑํ ์๋ ์์ต๋๋ค.
๊ทธ๋ฌ๋ ๊ฒฐ์ ๋ก ์ ์๊ณ ๋ฆฌ์ฆ์ ๋น๊ฒฐ์ ๋ก ์ ์๊ณ ๋ฆฌ์ฆ๋ณด๋ค ๋๋ฆฌ๊ณ ์ฑ๋ฅ์ด ์ ํ๋ ์ ์์ต๋๋ค.
ํ์ง๋ง ์ฌํ์ฑ์ด ์ค์ํ๋ค๋ฉด, ์ด๊ฒ์ด ์ต์ ์ ๋ฐฉ๋ฒ์
๋๋ค!
๋ ์ด์์ CUDA ์คํธ๋ฆผ์์ ์์
์ด ์์๋ ๋ ๋น๊ฒฐ์ ๋ก ์ ๋์์ด ๋ฐ์ํฉ๋๋ค.
์ด ๋ฌธ์ ๋ฅผ ๋ฐฉ์งํ๋ ค๋ฉด ํ๊ฒฝ ๋ณ์ [CUBLAS_WORKSPACE_CONFIG](https://docs.nvidia.com/cuda/cublas/index.html#results-reproducibility)๋ฅผ `:16:8`๋ก ์ค์ ํด์
๋ฐํ์ ์ค์ ์ค์ง ํ๋์ ๋ฒํผ ํฌ๋ฆฌ๋ง ์ฌ์ฉํ๋๋ก ์ค์ ํฉ๋๋ค.
PyTorch๋ ์ผ๋ฐ์ ์ผ๋ก ๊ฐ์ฅ ๋น ๋ฅธ ์๊ณ ๋ฆฌ์ฆ์ ์ ํํ๊ธฐ ์ํด ์ฌ๋ฌ ์๊ณ ๋ฆฌ์ฆ์ ๋ฒค์น๋งํนํฉ๋๋ค.
ํ์ง๋ง ์ฌํ์ฑ์ ์ํ๋ ๊ฒฝ์ฐ, ๋ฒค์น๋งํฌ๊ฐ ๋งค ์๊ฐ ๋ค๋ฅธ ์๊ณ ๋ฆฌ์ฆ์ ์ ํํ ์ ์๊ธฐ ๋๋ฌธ์ ์ด ๊ธฐ๋ฅ์ ์ฌ์ฉํ์ง ์๋๋ก ์ค์ ํด์ผ ํฉ๋๋ค.
๋ง์ง๋ง์ผ๋ก, [torch.use_deterministic_algorithms](https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html)์
`True`๋ฅผ ํต๊ณผ์์ผ ๊ฒฐ์ ๋ก ์ ์๊ณ ๋ฆฌ์ฆ์ด ํ์ฑํ ๋๋๋ก ํฉ๋๋ค.
```py
import os
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8"
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(True)
```
์ด์ ๋์ผํ ํ์ดํ๋ผ์ธ์ ๋๋ฒ ์คํํ๋ฉด ๋์ผํ ๊ฒฐ๊ณผ๋ฅผ ์ป์ ์ ์์ต๋๋ค.
```py
import torch
from diffusers import DDIMScheduler, StableDiffusionPipeline
import numpy as np
model_id = "runwayml/stable-diffusion-v1-5"
pipe = StableDiffusionPipeline.from_pretrained(model_id).to("cuda")
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
g = torch.Generator(device="cuda")
prompt = "A bear is playing a guitar on Times Square"
g.manual_seed(0)
result1 = pipe(prompt=prompt, num_inference_steps=50, generator=g, output_type="latent").images
g.manual_seed(0)
result2 = pipe(prompt=prompt, num_inference_steps=50, generator=g, output_type="latent").images
print("L_inf dist = ", abs(result1 - result2).max())
"L_inf dist = tensor(0., device='cuda:0')"
```
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/using-diffusers/control_brightness.md
|
# ์ด๋ฏธ์ง ๋ฐ๊ธฐ ์กฐ์ ํ๊ธฐ
Stable Diffusion ํ์ดํ๋ผ์ธ์ [์ผ๋ฐ์ ์ธ ๋ํจ์ ๋
ธ์ด์ฆ ์ค์ผ์ค๊ณผ ์ํ ๋จ๊ณ์ ๊ฒฐํจ์ด ์์](https://huggingface.co/papers/2305.08891) ๋
ผ๋ฌธ์์ ์ค๋ช
ํ ๊ฒ์ฒ๋ผ ๋งค์ฐ ๋ฐ๊ฑฐ๋ ์ด๋์ด ์ด๋ฏธ์ง๋ฅผ ์์ฑํ๋ ๋ฐ๋ ์ฑ๋ฅ์ด ํ๋ฒํฉ๋๋ค. ์ด ๋
ผ๋ฌธ์์ ์ ์ํ ์๋ฃจ์
์ ํ์ฌ [`DDIMScheduler`]์ ๊ตฌํ๋์ด ์์ผ๋ฉฐ ์ด๋ฏธ์ง์ ๋ฐ๊ธฐ๋ฅผ ๊ฐ์ ํ๋ ๋ฐ ์ฌ์ฉํ ์ ์์ต๋๋ค.
<Tip>
๐ก ์ ์๋ ์๋ฃจ์
์ ๋ํ ์์ธํ ๋ด์ฉ์ ์์ ๋งํฌ๋ ๋
ผ๋ฌธ์ ์ฐธ๊ณ ํ์ธ์!
</Tip>
ํด๊ฒฐ์ฑ
์ค ํ๋๋ *v ์์ธก๊ฐ*๊ณผ *v ๋ก์ค*๋ก ๋ชจ๋ธ์ ํ๋ จํ๋ ๊ฒ์
๋๋ค. ๋ค์ flag๋ฅผ [`train_text_to_image.py`](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py) ๋๋ [`train_text_to_image_lora.py`](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py) ์คํฌ๋ฆฝํธ์ ์ถ๊ฐํ์ฌ `v_prediction`์ ํ์ฑํํฉ๋๋ค:
```bash
--prediction_type="v_prediction"
```
์๋ฅผ ๋ค์ด, `v_prediction`์ผ๋ก ๋ฏธ์ธ ์กฐ์ ๋ [`ptx0/pseudo-journey-v2`](https://huggingface.co/ptx0/pseudo-journey-v2) ์ฒดํฌํฌ์ธํธ๋ฅผ ์ฌ์ฉํด ๋ณด๊ฒ ์ต๋๋ค.
๋ค์์ผ๋ก [`DDIMScheduler`]์์ ๋ค์ ํ๋ผ๋ฏธํฐ๋ฅผ ์ค์ ํฉ๋๋ค:
1. rescale_betas_zero_snr=True`, ๋
ธ์ด์ฆ ์ค์ผ์ค์ ์ ๋ก ํฐ๋ฏธ๋ ์ ํธ ๋ ์ก์๋น(SNR)๋ก ์ฌ์กฐ์ ํฉ๋๋ค.
2. `timestep_spacing="trailing"`, ๋ง์ง๋ง ํ์์คํ
๋ถํฐ ์ํ๋ง ์์
```py
>>> from diffusers import DiffusionPipeline, DDIMScheduler
>>> pipeline = DiffusionPipeline.from_pretrained("ptx0/pseudo-journey-v2")
# switch the scheduler in the pipeline to use the DDIMScheduler
>>> pipeline.scheduler = DDIMScheduler.from_config(
... pipeline.scheduler.config, rescale_betas_zero_snr=True, timestep_spacing="trailing"
... )
>>> pipeline.to("cuda")
```
๋ง์ง๋ง์ผ๋ก ํ์ดํ๋ผ์ธ์ ๋ํ ํธ์ถ์์ `guidance_rescale`์ ์ค์ ํ์ฌ ๊ณผ๋ค ๋
ธ์ถ์ ๋ฐฉ์งํฉ๋๋ค:
```py
prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k"
image = pipeline(prompt, guidance_rescale=0.7).images[0]
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/zero_snr.png"/>
</div>
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/using-diffusers/weighted_prompts.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# ํ๋กฌํํธ์ ๊ฐ์ค์น ๋ถ์ฌํ๊ธฐ
[[open-in-colab]]
ํ
์คํธ ๊ฐ์ด๋ ๊ธฐ๋ฐ์ diffusion ๋ชจ๋ธ์ ์ฃผ์ด์ง ํ
์คํธ ํ๋กฌํํธ๋ฅผ ๊ธฐ๋ฐ์ผ๋ก ์ด๋ฏธ์ง๋ฅผ ์์ฑํฉ๋๋ค.
ํ
์คํธ ํ๋กฌํํธ์๋ ๋ชจ๋ธ์ด ์์ฑํด์ผ ํ๋ ์ฌ๋ฌ ๊ฐ๋
์ด ํฌํจ๋ ์ ์์ผ๋ฉฐ ํ๋กฌํํธ์ ํน์ ๋ถ๋ถ์ ๊ฐ์ค์น๋ฅผ ๋ถ์ฌํ๋ ๊ฒ์ด ๋ฐ๋์งํ ๊ฒฝ์ฐ๊ฐ ๋ง์ต๋๋ค.
Diffusion ๋ชจ๋ธ์ ๋ฌธ๋งฅํ๋ ํ
์คํธ ์๋ฒ ๋ฉ์ผ๋ก diffusion ๋ชจ๋ธ์ cross attention ๋ ์ด์ด๋ฅผ ์กฐ์ ํจ์ผ๋ก์จ ์๋ํฉ๋๋ค.
([๋ ๋ง์ ์ ๋ณด๋ฅผ ์ํ Stable Diffusion Guide](https://huggingface.co/docs/optimum-neuron/main/en/package_reference/modeling#stable-diffusion)๋ฅผ ์ฐธ๊ณ ํ์ธ์).
๋ฐ๋ผ์ ํ๋กฌํํธ์ ํน์ ๋ถ๋ถ์ ๊ฐ์กฐํ๋(๋๋ ๊ฐ์กฐํ์ง ์๋) ๊ฐ๋จํ ๋ฐฉ๋ฒ์ ํ๋กฌํํธ์ ๊ด๋ จ ๋ถ๋ถ์ ํด๋นํ๋ ํ
์คํธ ์๋ฒ ๋ฉ ๋ฒกํฐ์ ํฌ๊ธฐ๋ฅผ ๋๋ฆฌ๊ฑฐ๋ ์ค์ด๋ ๊ฒ์
๋๋ค.
์ด๊ฒ์ "ํ๋กฌํํธ ๊ฐ์ค์น ๋ถ์ฌ" ๋ผ๊ณ ํ๋ฉฐ, ์ปค๋ฎค๋ํฐ์์ ๊ฐ์ฅ ์๊ตฌํ๋ ๊ธฐ๋ฅ์
๋๋ค.([์ด๊ณณ](https://github.com/huggingface/diffusers/issues/2431)์ issue๋ฅผ ๋ณด์ธ์ ).
## Diffusers์์ ํ๋กฌํํธ ๊ฐ์ค์น ๋ถ์ฌํ๋ ๋ฐฉ๋ฒ
์ฐ๋ฆฌ๋ `diffusers`์ ์ญํ ์ด ๋ค๋ฅธ ํ๋ก์ ํธ๋ฅผ ๊ฐ๋ฅํ๊ฒ ํ๋ ํ์์ ์ธ ๊ธฐ๋ฅ์ ์ ๊ณตํ๋ toolbex๋ผ๊ณ ์๊ฐํฉ๋๋ค.
[InvokeAI](https://github.com/invoke-ai/InvokeAI) ๋ [diffuzers](https://github.com/abhishekkrthakur/diffuzers) ๊ฐ์ ๊ฐ๋ ฅํ UI๋ฅผ ๊ตฌ์ถํ ์ ์์ต๋๋ค.
ํ๋กฌํํธ๋ฅผ ์กฐ์ํ๋ ๋ฐฉ๋ฒ์ ์ง์ํ๊ธฐ ์ํด, `diffusers` ๋
[StableDiffusionPipeline](https://huggingface.co/docs/diffusers/v0.18.2/en/api/pipelines/stable_diffusion/text2img#diffusers.StableDiffusionPipeline)์ ๊ฐ์
๋ง์ ํ์ดํ๋ผ์ธ์ [prompt_embeds](https://huggingface.co/docs/diffusers/v0.14.0/en/api/pipelines/stable_diffusion/text2img#diffusers.StableDiffusionPipeline.__call__.prompt_embeds)
์ธ์๋ฅผ ๋
ธ์ถ์์ผ, "prompt-weighted"/์ถ์ฒ๋ ํ
์คํธ ์๋ฒ ๋ฉ์ ํ์ดํ๋ผ์ธ์ ๋ฐ๋ก ์ ๋ฌํ ์ ์๊ฒ ํฉ๋๋ค.
[Compel ๋ผ์ด๋ธ๋ฌ๋ฆฌ](https://github.com/damian0815/compel)๋ ํ๋กฌํํธ์ ์ผ๋ถ๋ฅผ ๊ฐ์กฐํ๊ฑฐ๋ ๊ฐ์กฐํ์ง ์์ ์ ์๋ ์ฌ์ด ๋ฐฉ๋ฒ์ ์ ๊ณตํฉ๋๋ค.
์๋ฒ ๋ฉ์ ์ง์ ์ค๋นํ๋ ๊ฒ ๋์ ์ด ๋ฐฉ๋ฒ์ ์ฌ์ฉํ๋ ๊ฒ์ ๊ฐ๋ ฅํ ์ถ์ฒํฉ๋๋ค.
๊ฐ๋จํ ์์ ๋ฅผ ์ดํด๋ณด๊ฒ ์ต๋๋ค.
๋ค์๊ณผ ๊ฐ์ด `"๊ณต์ ๊ฐ๊ณ ๋
ธ๋ ๋ถ์์ ๊ณ ์์ด"` ์ด๋ฏธ์ง๋ฅผ ์์ฑํ๊ณ ์ถ์ต๋๋ค:
```py
from diffusers import StableDiffusionPipeline, UniPCMultistepScheduler
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
prompt = "a red cat playing with a ball"
generator = torch.Generator(device="cpu").manual_seed(33)
image = pipe(prompt, generator=generator, num_inference_steps=20).images[0]
image
```
์์ฑ๋ ์ด๋ฏธ์ง:

์ฌ์ง์์ ์ ์ ์๋ฏ์ด, "๊ณต"์ ์ด๋ฏธ์ง์ ์์ต๋๋ค. ์ด ๋ถ๋ถ์ ๊ฐ์กฐํด ๋ณผ๊น์!
๋จผ์ `compel` ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ฅผ ์ค์นํด์ผํฉ๋๋ค:
```
pip install compel
```
๊ทธ๋ฐ ๋ค์์๋ `Compel` ์ค๋ธ์ ํธ๋ฅผ ์์ฑํฉ๋๋ค:
```py
from compel import Compel
compel_proc = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder)
```
์ด์ `"++"` ๋ฅผ ์ฌ์ฉํด์ "๊ณต" ์ ๊ฐ์กฐํด ๋ด
์๋ค:
```py
prompt = "a red cat playing with a ball++"
```
๊ทธ๋ฆฌ๊ณ ์ด ํ๋กฌํํธ๋ฅผ ํ์ดํ๋ผ์ธ์ ๋ฐ๋ก ์ ๋ฌํ์ง ์๊ณ , `compel_proc` ๋ฅผ ์ฌ์ฉํ์ฌ ์ฒ๋ฆฌํด์ผํฉ๋๋ค:
```py
prompt_embeds = compel_proc(prompt)
```
ํ์ดํ๋ผ์ธ์ `prompt_embeds` ๋ฅผ ๋ฐ๋ก ์ ๋ฌํ ์ ์์ต๋๋ค:
```py
generator = torch.Generator(device="cpu").manual_seed(33)
images = pipe(prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20).images[0]
image
```
์ด์ "๊ณต"์ด ์๋ ๊ทธ๋ฆผ์ ์ถ๋ ฅํ ์ ์์ต๋๋ค!

๋ง์ฐฌ๊ฐ์ง๋ก `--` ์ ๋ฏธ์ฌ๋ฅผ ๋จ์ด์ ์ฌ์ฉํ์ฌ ๋ฌธ์ฅ์ ์ผ๋ถ๋ฅผ ๊ฐ์กฐํ์ง ์์ ์ ์์ต๋๋ค. ํ๋ฒ ์๋ํด ๋ณด์ธ์!
์ฆ๊ฒจ์ฐพ๋ ํ์ดํ๋ผ์ธ์ `prompt_embeds` ์
๋ ฅ์ด ์๋ ๊ฒฝ์ฐ issue๋ฅผ ์๋ก ๋ง๋ค์ด์ฃผ์ธ์.
Diffusers ํ์ ์ต๋ํ ๋์ํ๋ ค๊ณ ๋
ธ๋ ฅํฉ๋๋ค.
Compel 1.1.6 ๋ textual inversions์ ์ฌ์ฉํ์ฌ ๋จ์ํํ๋ ์ ํฐ๋ฆดํฐ ํด๋์ค๋ฅผ ์ถ๊ฐํฉ๋๋ค.
`DiffusersTextualInversionManager`๋ฅผ ์ธ์คํด์คํ ํ ํ ์ด๋ฅผ Compel init์ ์ ๋ฌํฉ๋๋ค:
```
textual_inversion_manager = DiffusersTextualInversionManager(pipe)
compel = Compel(
tokenizer=pipe.tokenizer,
text_encoder=pipe.text_encoder,
textual_inversion_manager=textual_inversion_manager)
```
๋ ๋ง์ ์ ๋ณด๋ฅผ ์ป๊ณ ์ถ๋ค๋ฉด [compel](https://github.com/damian0815/compel) ๋ผ์ด๋ธ๋ฌ๋ฆฌ ๋ฌธ์๋ฅผ ์ฐธ๊ณ ํ์ธ์.
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/using-diffusers/contribute_pipeline.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# ์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ์ ๊ธฐ์ฌํ๋ ๋ฐฉ๋ฒ
<Tip>
๐ก ๋ชจ๋ ์ฌ๋์ด ์๋ ์ ํ ์์ด ์ฝ๊ฒ ์์
์ ๊ณต์ ํ ์ ์๋๋ก ์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ์ ์ถ๊ฐํ๋ ์ด์ ์ ๋ํ ์์ธํ ๋ด์ฉ์ GitHub ์ด์ [#841](https://github.com/huggingface/diffusers/issues/841)๋ฅผ ์ฐธ์กฐํ์ธ์.
</Tip>
์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ์ ์ฌ์ฉํ๋ฉด [`DiffusionPipeline`] ์์ ์ํ๋ ์ถ๊ฐ ๊ธฐ๋ฅ์ ์ถ๊ฐํ ์ ์์ต๋๋ค. `DiffusionPipeline` ์์ ๊ตฌ์ถํ ๋์ ๊ฐ์ฅ ํฐ ์ฅ์ ์ ๋๊ตฌ๋ ์ธ์๋ฅผ ํ๋๋ง ์ถ๊ฐํ๋ฉด ํ์ดํ๋ผ์ธ์ ๋ก๋ํ๊ณ ์ฌ์ฉํ ์ ์์ด ์ปค๋ฎค๋ํฐ๊ฐ ๋งค์ฐ ์ฝ๊ฒ ์ ๊ทผํ ์ ์๋ค๋ ๊ฒ์
๋๋ค.
์ด๋ฒ ๊ฐ์ด๋์์๋ ์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ์ ์์ฑํ๋ ๋ฐฉ๋ฒ๊ณผ ์๋ ์๋ฆฌ๋ฅผ ์ค๋ช
ํฉ๋๋ค.
๊ฐ๋จํ๊ฒ ์ค๋ช
ํ๊ธฐ ์ํด `UNet`์ด ๋จ์ผ forward pass๋ฅผ ์ํํ๊ณ ์ค์ผ์ค๋ฌ๋ฅผ ํ ๋ฒ ํธ์ถํ๋ "one-step" ํ์ดํ๋ผ์ธ์ ๋ง๋ค๊ฒ ์ต๋๋ค.
## ํ์ดํ๋ผ์ธ ์ด๊ธฐํ
์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ์ ์ํ `one_step_unet.py` ํ์ผ์ ์์ฑํ๋ ๊ฒ์ผ๋ก ์์ํฉ๋๋ค. ์ด ํ์ผ์์, Hub์์ ๋ชจ๋ธ ๊ฐ์ค์น์ ์ค์ผ์ค๋ฌ ๊ตฌ์ฑ์ ๋ก๋ํ ์ ์๋๋ก [`DiffusionPipeline`]์ ์์ํ๋ ํ์ดํ๋ผ์ธ ํด๋์ค๋ฅผ ์์ฑํฉ๋๋ค. one-step ํ์ดํ๋ผ์ธ์๋ `UNet`๊ณผ ์ค์ผ์ค๋ฌ๊ฐ ํ์ํ๋ฏ๋ก ์ด๋ฅผ `__init__` ํจ์์ ์ธ์๋ก ์ถ๊ฐํด์ผํฉ๋๋ค:
```python
from diffusers import DiffusionPipeline
import torch
class UnetSchedulerOneForwardPipeline(DiffusionPipeline):
def __init__(self, unet, scheduler):
super().__init__()
```
ํ์ดํ๋ผ์ธ๊ณผ ๊ทธ ๊ตฌ์ฑ์์(`unet` and `scheduler`)๋ฅผ [`~DiffusionPipeline.save_pretrained`]์ผ๋ก ์ ์ฅํ ์ ์๋๋ก ํ๋ ค๋ฉด `register_modules` ํจ์์ ์ถ๊ฐํ์ธ์:
```diff
from diffusers import DiffusionPipeline
import torch
class UnetSchedulerOneForwardPipeline(DiffusionPipeline):
def __init__(self, unet, scheduler):
super().__init__()
+ self.register_modules(unet=unet, scheduler=scheduler)
```
์ด์ '์ด๊ธฐํ' ๋จ๊ณ๊ฐ ์๋ฃ๋์์ผ๋ forward pass๋ก ์ด๋ํ ์ ์์ต๋๋ค! ๐ฅ
## Forward pass ์ ์
Forward pass ์์๋(`__call__`๋ก ์ ์ํ๋ ๊ฒ์ด ์ข์ต๋๋ค) ์ํ๋ ๊ธฐ๋ฅ์ ์ถ๊ฐํ ์ ์๋ ์์ ํ ์ฐฝ์ ์์ ๊ฐ ์์ต๋๋ค. ์ฐ๋ฆฌ์ ๋๋ผ์ด one-step ํ์ดํ๋ผ์ธ์ ๊ฒฝ์ฐ, ์์์ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ๊ณ `timestep=1`์ ์ค์ ํ์ฌ `unet`๊ณผ `scheduler`๋ฅผ ํ ๋ฒ๋ง ํธ์ถํฉ๋๋ค:
```diff
from diffusers import DiffusionPipeline
import torch
class UnetSchedulerOneForwardPipeline(DiffusionPipeline):
def __init__(self, unet, scheduler):
super().__init__()
self.register_modules(unet=unet, scheduler=scheduler)
+ def __call__(self):
+ image = torch.randn(
+ (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
+ )
+ timestep = 1
+ model_output = self.unet(image, timestep).sample
+ scheduler_output = self.scheduler.step(model_output, timestep, image).prev_sample
+ return scheduler_output
```
๋๋ฌ์ต๋๋ค! ๐ ์ด์ ์ด ํ์ดํ๋ผ์ธ์ `unet`๊ณผ `scheduler`๋ฅผ ์ ๋ฌํ์ฌ ์คํํ ์ ์์ต๋๋ค:
```python
from diffusers import DDPMScheduler, UNet2DModel
scheduler = DDPMScheduler()
unet = UNet2DModel()
pipeline = UnetSchedulerOneForwardPipeline(unet=unet, scheduler=scheduler)
output = pipeline()
```
ํ์ง๋ง ํ์ดํ๋ผ์ธ ๊ตฌ์กฐ๊ฐ ๋์ผํ ๊ฒฝ์ฐ ๊ธฐ์กด ๊ฐ์ค์น๋ฅผ ํ์ดํ๋ผ์ธ์ ๋ก๋ํ ์ ์๋ค๋ ์ฅ์ ์ด ์์ต๋๋ค. ์๋ฅผ ๋ค์ด one-step ํ์ดํ๋ผ์ธ์ [`google/ddpm-cifar10-32`](https://huggingface.co/google/ddpm-cifar10-32) ๊ฐ์ค์น๋ฅผ ๋ก๋ํ ์ ์์ต๋๋ค:
```python
pipeline = UnetSchedulerOneForwardPipeline.from_pretrained("google/ddpm-cifar10-32")
output = pipeline()
```
## ํ์ดํ๋ผ์ธ ๊ณต์
๐งจDiffusers [๋ฆฌํฌ์งํ ๋ฆฌ](https://github.com/huggingface/diffusers)์์ Pull Request๋ฅผ ์ด์ด [examples/community](https://github.com/huggingface/diffusers/tree/main/examples/community) ํ์ ํด๋์ `one_step_unet.py`์ ๋ฉ์ง ํ์ดํ๋ผ์ธ์ ์ถ๊ฐํ์ธ์.
๋ณํฉ์ด ๋๋ฉด, `diffusers >= 0.4.0`์ด ์ค์น๋ ์ฌ์ฉ์๋ผ๋ฉด ๋๊ตฌ๋ `custom_pipeline` ์ธ์์ ์ง์ ํ์ฌ ์ด ํ์ดํ๋ผ์ธ์ ๋ง์ ์ฒ๋ผ ๐ช ์ฌ์ฉํ ์ ์์ต๋๋ค:
```python
from diffusers import DiffusionPipeline
pipe = DiffusionPipeline.from_pretrained("google/ddpm-cifar10-32", custom_pipeline="one_step_unet")
pipe()
```
์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ์ ๊ณต์ ํ๋ ๋ ๋ค๋ฅธ ๋ฐฉ๋ฒ์ Hub ์์ ์ ํธํ๋ [๋ชจ๋ธ ๋ฆฌํฌ์งํ ๋ฆฌ](https://huggingface.co/docs/hub/models-uploading)์ ์ง์ `one_step_unet.py` ํ์ผ์ ์
๋ก๋ํ๋ ๊ฒ์
๋๋ค. `one_step_unet.py` ํ์ผ์ ์ง์ ํ๋ ๋์ ๋ชจ๋ธ ์ ์ฅ์ id๋ฅผ `custom_pipeline` ์ธ์์ ์ ๋ฌํ์ธ์:
```python
from diffusers import DiffusionPipeline
pipeline = DiffusionPipeline.from_pretrained("google/ddpm-cifar10-32", custom_pipeline="stevhliu/one_step_unet")
```
๋ค์ ํ์์ ๋ ๊ฐ์ง ๊ณต์ ์ํฌํ๋ก์ฐ๋ฅผ ๋น๊ตํ์ฌ ์์ ์๊ฒ ๊ฐ์ฅ ์ ํฉํ ์ต์
์ ๊ฒฐ์ ํ๋ ๋ฐ ๋์์ด ๋๋ ์ ๋ณด๋ฅผ ํ์ธํ์ธ์:
| | GitHub ์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ | HF Hub ์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ |
|----------------|------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------|
| ์ฌ์ฉ๋ฒ | ๋์ผ | ๋์ผ |
| ๋ฆฌ๋ทฐ ๊ณผ์ | ๋ณํฉํ๊ธฐ ์ ์ GitHub์์ Pull Request๋ฅผ ์ด๊ณ Diffusers ํ์ ๊ฒํ ๊ณผ์ ์ ๊ฑฐ์นฉ๋๋ค. ์๋๊ฐ ๋๋ฆด ์ ์์ต๋๋ค. | ๊ฒํ ์์ด Hub ์ ์ฅ์์ ๋ฐ๋ก ์
๋ก๋ํฉ๋๋ค. ๊ฐ์ฅ ๋น ๋ฅธ ์ํฌํ๋ก์ฐ ์
๋๋ค. |
| ๊ฐ์์ฑ | ๊ณต์ Diffusers ์ ์ฅ์ ๋ฐ ๋ฌธ์์ ํฌํจ๋์ด ์์ต๋๋ค. | HF ํ๋ธ ํ๋กํ์ ํฌํจ๋๋ฉฐ ๊ฐ์์ฑ์ ํ๋ณดํ๊ธฐ ์ํด ์์ ์ ์ฌ์ฉ๋/ํ๋ก๋ชจ์
์ ์์กดํฉ๋๋ค. |
<Tip>
๐ก ์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ ํ์ผ์ ์ํ๋ ํจํค์ง๋ฅผ ์ฌ์ฉํ ์ ์์ต๋๋ค. ์ฌ์ฉ์๊ฐ ํจํค์ง๋ฅผ ์ค์นํ๊ธฐ๋ง ํ๋ฉด ๋ชจ๋ ๊ฒ์ด ์ ์์ ์ผ๋ก ์๋ํฉ๋๋ค. ํ์ดํ๋ผ์ธ์ด ์๋์ผ๋ก ๊ฐ์ง๋๋ฏ๋ก `DiffusionPipeline`์์ ์์ํ๋ ํ์ดํ๋ผ์ธ ํด๋์ค๊ฐ ํ๋๋ง ์๋์ง ํ์ธํ์ธ์.
</Tip>
## ์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ์ ์ด๋ป๊ฒ ์๋ํ๋์?
์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ์ [`DiffusionPipeline`]์ ์์ํ๋ ํด๋์ค์
๋๋ค:
- [`custom_pipeline`] ์ธ์๋ก ๋ก๋ํ ์ ์์ต๋๋ค.
- ๋ชจ๋ธ ๊ฐ์ค์น ๋ฐ ์ค์ผ์ค๋ฌ ๊ตฌ์ฑ์ [`pretrained_model_name_or_path`]์์ ๋ก๋๋ฉ๋๋ค.
- ์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ์์ ๊ธฐ๋ฅ์ ๊ตฌํํ๋ ์ฝ๋๋ `pipeline.py` ํ์ผ์ ์ ์๋์ด ์์ต๋๋ค.
๊ณต์ ์ ์ฅ์์์ ๋ชจ๋ ํ์ดํ๋ผ์ธ ๊ตฌ์ฑ ์์ ๊ฐ์ค์น๋ฅผ ๋ก๋ํ ์ ์๋ ๊ฒฝ์ฐ๊ฐ ์์ต๋๋ค. ์ด ๊ฒฝ์ฐ ๋ค๋ฅธ ๊ตฌ์ฑ ์์๋ ํ์ดํ๋ผ์ธ์ ์ง์ ์ ๋ฌํด์ผ ํฉ๋๋ค:
```python
from diffusers import DiffusionPipeline
from transformers import CLIPFeatureExtractor, CLIPModel
model_id = "CompVis/stable-diffusion-v1-4"
clip_model_id = "laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
feature_extractor = CLIPFeatureExtractor.from_pretrained(clip_model_id)
clip_model = CLIPModel.from_pretrained(clip_model_id, torch_dtype=torch.float16)
pipeline = DiffusionPipeline.from_pretrained(
model_id,
custom_pipeline="clip_guided_stable_diffusion",
clip_model=clip_model,
feature_extractor=feature_extractor,
scheduler=scheduler,
torch_dtype=torch.float16,
)
```
์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ์ ๋ง๋ฒ์ ๋ค์ ์ฝ๋์ ๋ด๊ฒจ ์์ต๋๋ค. ์ด ์ฝ๋๋ฅผ ํตํด ์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ์ GitHub ๋๋ Hub์์ ๋ก๋ํ ์ ์์ผ๋ฉฐ, ๋ชจ๋ ๐งจ Diffusers ํจํค์ง์์ ์ฌ์ฉํ ์ ์์ต๋๋ค.
```python
# 2. ํ์ดํ๋ผ์ธ ํด๋์ค๋ฅผ ๋ก๋ํฉ๋๋ค. ์ฌ์ฉ์ ์ง์ ๋ชจ๋์ ์ฌ์ฉํ๋ ๊ฒฝ์ฐ Hub์์ ๋ก๋ํฉ๋๋ค
# ๋ช
์์ ํด๋์ค์์ ๋ก๋ํ๋ ๊ฒฝ์ฐ, ์ด๋ฅผ ์ฌ์ฉํด ๋ณด๊ฒ ์ต๋๋ค.
if custom_pipeline is not None:
pipeline_class = get_class_from_dynamic_module(
custom_pipeline, module_file=CUSTOM_PIPELINE_FILE_NAME, cache_dir=custom_pipeline
)
elif cls != DiffusionPipeline:
pipeline_class = cls
else:
diffusers_module = importlib.import_module(cls.__module__.split(".")[0])
pipeline_class = getattr(diffusers_module, config_dict["_class_name"])
```
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/using-diffusers/depth2img.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Text-guided depth-to-image ์์ฑ
[[open-in-colab]]
[`StableDiffusionDepth2ImgPipeline`]์ ์ฌ์ฉํ๋ฉด ํ
์คํธ ํ๋กฌํํธ์ ์ด๊ธฐ ์ด๋ฏธ์ง๋ฅผ ์ ๋ฌํ์ฌ ์ ์ด๋ฏธ์ง์ ์์ฑ์ ์กฐ์ ํ ์ ์์ต๋๋ค. ๋ํ ์ด๋ฏธ์ง ๊ตฌ์กฐ๋ฅผ ๋ณด์กดํ๊ธฐ ์ํด `depth_map`์ ์ ๋ฌํ ์๋ ์์ต๋๋ค. `depth_map`์ด ์ ๊ณต๋์ง ์์ผ๋ฉด ํ์ดํ๋ผ์ธ์ ํตํฉ๋ [depth-estimation model](https://github.com/isl-org/MiDaS)์ ํตํด ์๋์ผ๋ก ๊น์ด๋ฅผ ์์ธกํฉ๋๋ค.
๋จผ์ [`StableDiffusionDepth2ImgPipeline`]์ ์ธ์คํด์ค๋ฅผ ์์ฑํฉ๋๋ค:
```python
import torch
import requests
from PIL import Image
from diffusers import StableDiffusionDepth2ImgPipeline
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-depth",
torch_dtype=torch.float16,
).to("cuda")
```
์ด์ ํ๋กฌํํธ๋ฅผ ํ์ดํ๋ผ์ธ์ ์ ๋ฌํฉ๋๋ค. ํน์ ๋จ์ด๊ฐ ์ด๋ฏธ์ง ์์ฑ์ ๊ฐ์ด๋ ํ๋๊ฒ์ ๋ฐฉ์งํ๊ธฐ ์ํด `negative_prompt`๋ฅผ ์ ๋ฌํ ์๋ ์์ต๋๋ค:
```python
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
init_image = Image.open(requests.get(url, stream=True).raw)
prompt = "two tigers"
n_prompt = "bad, deformed, ugly, bad anatomy"
image = pipe(prompt=prompt, image=init_image, negative_prompt=n_prompt, strength=0.7).images[0]
image
```
| Input | Output |
|---------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------|
| <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/coco-cats.png" width="500"/> | <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/depth2img-tigers.png" width="500"/> |
์๋์ Spaces๋ฅผ ๊ฐ์ง๊ณ ๋๋ฉฐ depth map์ด ์๋ ์ด๋ฏธ์ง์ ์๋ ์ด๋ฏธ์ง์ ์ฐจ์ด๊ฐ ์๋์ง ํ์ธํด ๋ณด์ธ์!
<iframe
src="https://radames-stable-diffusion-depth2img.hf.space"
frameborder="0"
width="850"
height="500"
></iframe>
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/using-diffusers/conditional_image_generation.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# ์กฐ๊ฑด๋ถ ์ด๋ฏธ์ง ์์ฑ
[[open-in-colab]]
์กฐ๊ฑด๋ถ ์ด๋ฏธ์ง ์์ฑ์ ์ฌ์ฉํ๋ฉด ํ
์คํธ ํ๋กฌํํธ์์ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ ์ ์์ต๋๋ค. ํ
์คํธ๋ ์๋ฒ ๋ฉ์ผ๋ก ๋ณํ๋๋ฉฐ, ์๋ฒ ๋ฉ์ ๋
ธ์ด์ฆ์์ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ๋๋ก ๋ชจ๋ธ์ ์กฐ๊ฑดํํ๋ ๋ฐ ์ฌ์ฉ๋ฉ๋๋ค.
[`DiffusionPipeline`]์ ์ถ๋ก ์ ์ํด ์ฌ์ ํ๋ จ๋ diffusion ์์คํ
์ ์ฌ์ฉํ๋ ๊ฐ์ฅ ์ฌ์ด ๋ฐฉ๋ฒ์
๋๋ค.
๋จผ์ [`DiffusionPipeline`]์ ์ธ์คํด์ค๋ฅผ ์์ฑํ๊ณ ๋ค์ด๋ก๋ํ ํ์ดํ๋ผ์ธ [์ฒดํฌํฌ์ธํธ](https://huggingface.co/models?library=diffusers&sort=downloads)๋ฅผ ์ง์ ํฉ๋๋ค.
์ด ๊ฐ์ด๋์์๋ [์ ์ฌ Diffusion](https://huggingface.co/CompVis/ldm-text2im-large-256)๊ณผ ํจ๊ป ํ
์คํธ-์ด๋ฏธ์ง ์์ฑ์ [`DiffusionPipeline`]์ ์ฌ์ฉํฉ๋๋ค:
```python
>>> from diffusers import DiffusionPipeline
>>> generator = DiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256")
```
[`DiffusionPipeline`]์ ๋ชจ๋ ๋ชจ๋ธ๋ง, ํ ํฐํ, ์ค์ผ์ค๋ง ๊ตฌ์ฑ ์์๋ฅผ ๋ค์ด๋ก๋ํ๊ณ ์บ์ํฉ๋๋ค.
์ด ๋ชจ๋ธ์ ์ฝ 14์ต ๊ฐ์ ํ๋ผ๋ฏธํฐ๋ก ๊ตฌ์ฑ๋์ด ์๊ธฐ ๋๋ฌธ์ GPU์์ ์คํํ ๊ฒ์ ๊ฐ๋ ฅํ ๊ถ์ฅํฉ๋๋ค.
PyTorch์์์ ๋ง์ฐฌ๊ฐ์ง๋ก ์์ฑ๊ธฐ ๊ฐ์ฒด๋ฅผ GPU๋ก ์ด๋ํ ์ ์์ต๋๋ค:
```python
>>> generator.to("cuda")
```
์ด์ ํ
์คํธ ํ๋กฌํํธ์์ `์์ฑ๊ธฐ`๋ฅผ ์ฌ์ฉํ ์ ์์ต๋๋ค:
```python
>>> image = generator("An image of a squirrel in Picasso style").images[0]
```
์ถ๋ ฅ๊ฐ์ ๊ธฐ๋ณธ์ ์ผ๋ก [`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html?highlight=image#the-image-class) ๊ฐ์ฒด๋ก ๋ํ๋ฉ๋๋ค.
ํธ์ถํ์ฌ ์ด๋ฏธ์ง๋ฅผ ์ ์ฅํ ์ ์์ต๋๋ค:
```python
>>> image.save("image_of_squirrel_painting.png")
```
์๋ ์คํ์ด์ค๋ฅผ ์ฌ์ฉํด๋ณด๊ณ ์๋ด ๋ฐฐ์จ ๋งค๊ฐ๋ณ์๋ฅผ ์์ ๋กญ๊ฒ ์กฐ์ ํ์ฌ ์ด๋ฏธ์ง ํ์ง์ ์ด๋ค ์ํฅ์ ๋ฏธ์น๋์ง ํ์ธํด ๋ณด์ธ์!
<iframe
src="https://stabilityai-stable-diffusion.hf.space"
frameborder="0"
width="850"
height="500"
></iframe>
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/using-diffusers/controlling_generation.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# ์ ์ด๋ ์์ฑ
Diffusion ๋ชจ๋ธ์ ์ํด ์์ฑ๋ ์ถ๋ ฅ์ ์ ์ดํ๋ ๊ฒ์ ์ปค๋ฎค๋ํฐ์์ ์ค๋ซ๋์ ์ถ๊ตฌํด ์์ผ๋ฉฐ ํ์ฌ ํ๋ฐํ ์ฐ๊ตฌ ์ฃผ์ ์
๋๋ค. ๋๋ฆฌ ์ฌ์ฉ๋๋ ๋ง์ diffusion ๋ชจ๋ธ์์๋ ์ด๋ฏธ์ง์ ํ
์คํธ ํ๋กฌํํธ ๋ฑ ์
๋ ฅ์ ๋ฏธ๋ฌํ ๋ณํ๋ก ์ธํด ์ถ๋ ฅ์ด ํฌ๊ฒ ๋ฌ๋ผ์ง ์ ์์ต๋๋ค. ์ด์์ ์ธ ์ธ๊ณ์์๋ ์๋ฏธ๊ฐ ์ ์ง๋๊ณ ๋ณ๊ฒฝ๋๋ ๋ฐฉ์์ ์ ์ดํ ์ ์๊ธฐ๋ฅผ ์ํฉ๋๋ค.
์๋ฏธ ๋ณด์กด์ ๋๋ถ๋ถ์ ์๋ ์
๋ ฅ์ ๋ณํ๋ฅผ ์ถ๋ ฅ์ ๋ณํ์ ์ ํํ๊ฒ ๋งคํํ๋ ๊ฒ์ผ๋ก ์ถ์๋ฉ๋๋ค. ์ฆ, ํ๋กฌํํธ์์ ํผ์ฌ์ฒด์ ํ์ฉ์ฌ๋ฅผ ์ถ๊ฐํ๋ฉด ์ ์ฒด ์ด๋ฏธ์ง๊ฐ ๋ณด์กด๋๊ณ ๋ณ๊ฒฝ๋ ํผ์ฌ์ฒด๋ง ์์ ๋ฉ๋๋ค. ๋๋ ํน์ ํผ์ฌ์ฒด์ ์ด๋ฏธ์ง๋ฅผ ๋ณํํ๋ฉด ํผ์ฌ์ฒด์ ํฌ์ฆ๊ฐ ์ ์ง๋ฉ๋๋ค.
์ถ๊ฐ์ ์ผ๋ก ์์ฑ๋ ์ด๋ฏธ์ง์ ํ์ง์๋ ์๋ฏธ ๋ณด์กด ์ธ์๋ ์ํฅ์ ๋ฏธ์น๊ณ ์ ํ๋ ํ์ง์ด ์์ต๋๋ค. ์ฆ, ์ผ๋ฐ์ ์ผ๋ก ๊ฒฐ๊ณผ๋ฌผ์ ํ์ง์ด ์ข๊ฑฐ๋ ํน์ ์คํ์ผ์ ๊ณ ์ํ๊ฑฐ๋ ์ฌ์ค์ ์ด๊ธฐ๋ฅผ ์ํฉ๋๋ค.
diffusion ๋ชจ๋ธ ์์ฑ์ ์ ์ดํ๊ธฐ ์ํด `diffusers`๊ฐ ์ง์ํ๋ ๋ช ๊ฐ์ง ๊ธฐ์ ์ ๋ฌธ์ํํฉ๋๋ค. ๋ง์ ๋ถ๋ถ์ด ์ต์ฒจ๋จ ์ฐ๊ตฌ์ด๋ฉฐ ๋ฏธ๋ฌํ ์ฐจ์ด๊ฐ ์์ ์ ์์ต๋๋ค. ๋ช
ํํ ์ค๋ช
์ด ํ์ํ๊ฑฐ๋ ์ ์ ์ฌํญ์ด ์์ผ๋ฉด ์ฃผ์ ํ์ง ๋ง์๊ณ [ํฌ๋ผ](https://discuss.huggingface.co/) ๋๋ [GitHub ์ด์](https://github.com/huggingface/diffusers/issues)์์ ํ ๋ก ์ ์์ํ์ธ์.
์์ฑ ์ ์ด ๋ฐฉ๋ฒ์ ๋ํ ๊ฐ๋ต์ ์ธ ์ค๋ช
๊ณผ ๊ธฐ์ ๊ฐ์๋ฅผ ์ ๊ณตํฉ๋๋ค. ๊ธฐ์ ์ ๋ํ ์์ธํ ์ค๋ช
์ ํ์ดํ๋ผ์ธ์์ ๋งํฌ๋ ์๋ณธ ๋
ผ๋ฌธ์ ์ฐธ์กฐํ๋ ๊ฒ์ด ๊ฐ์ฅ ์ข์ต๋๋ค.
์ฌ์ฉ ์ฌ๋ก์ ๋ฐ๋ผ ์ ์ ํ ๊ธฐ์ ์ ์ ํํด์ผ ํฉ๋๋ค. ๋ง์ ๊ฒฝ์ฐ ์ด๋ฌํ ๊ธฐ๋ฒ์ ๊ฒฐํฉํ ์ ์์ต๋๋ค. ์๋ฅผ ๋ค์ด, ํ
์คํธ ๋ฐ์ ๊ณผ SEGA๋ฅผ ๊ฒฐํฉํ์ฌ ํ
์คํธ ๋ฐ์ ์ ์ฌ์ฉํ์ฌ ์์ฑ๋ ์ถ๋ ฅ์ ๋ ๋ง์ ์๋ฏธ์ ์ง์นจ์ ์ ๊ณตํ ์ ์์ต๋๋ค.
๋ณ๋์ ์ธ๊ธ์ด ์๋ ํ, ์ด๋ฌํ ๊ธฐ๋ฒ์ ๊ธฐ์กด ๋ชจ๋ธ๊ณผ ํจ๊ป ์๋ํ๋ฉฐ ์์ฒด ๊ฐ์ค์น๊ฐ ํ์ํ์ง ์์ ๊ธฐ๋ฒ์
๋๋ค.
1. [Instruct Pix2Pix](#instruct-pix2pix)
2. [Pix2Pix Zero](#pix2pixzero)
3. [Attend and Excite](#attend-and-excite)
4. [Semantic Guidance](#semantic-guidance)
5. [Self-attention Guidance](#self-attention-guidance)
6. [Depth2Image](#depth2image)
7. [MultiDiffusion Panorama](#multidiffusion-panorama)
8. [DreamBooth](#dreambooth)
9. [Textual Inversion](#textual-inversion)
10. [ControlNet](#controlnet)
11. [Prompt Weighting](#prompt-weighting)
12. [Custom Diffusion](#custom-diffusion)
13. [Model Editing](#model-editing)
14. [DiffEdit](#diffedit)
15. [T2I-Adapter](#t2i-adapter)
ํธ์๋ฅผ ์ํด, ์ถ๋ก ๋ง ํ๊ฑฐ๋ ํ์ธํ๋/ํ์ตํ๋ ๋ฐฉ๋ฒ์ ๋ํ ํ๋ฅผ ์ ๊ณตํฉ๋๋ค.
| **Method** | **Inference only** | **Requires training /<br> fine-tuning** | **Comments** |
| :-------------------------------------------------: | :----------------: | :-------------------------------------: | :---------------------------------------------------------------------------------------------: |
| [Instruct Pix2Pix](#instruct-pix2pix) | โ
| โ | Can additionally be<br>fine-tuned for better <br>performance on specific <br>edit instructions. |
| [Pix2Pix Zero](#pix2pixzero) | โ
| โ | |
| [Attend and Excite](#attend-and-excite) | โ
| โ | |
| [Semantic Guidance](#semantic-guidance) | โ
| โ | |
| [Self-attention Guidance](#self-attention-guidance) | โ
| โ | |
| [Depth2Image](#depth2image) | โ
| โ | |
| [MultiDiffusion Panorama](#multidiffusion-panorama) | โ
| โ | |
| [DreamBooth](#dreambooth) | โ | โ
| |
| [Textual Inversion](#textual-inversion) | โ | โ
| |
| [ControlNet](#controlnet) | โ
| โ | A ControlNet can be <br>trained/fine-tuned on<br>a custom conditioning. |
| [Prompt Weighting](#prompt-weighting) | โ
| โ | |
| [Custom Diffusion](#custom-diffusion) | โ | โ
| |
| [Model Editing](#model-editing) | โ
| โ | |
| [DiffEdit](#diffedit) | โ
| โ | |
| [T2I-Adapter](#t2i-adapter) | โ
| โ | |
## Pix2Pix Instruct
[Paper](https://arxiv.org/abs/2211.09800)
[Instruct Pix2Pix](../api/pipelines/stable_diffusion/pix2pix) ๋ ์
๋ ฅ ์ด๋ฏธ์ง ํธ์ง์ ์ง์ํ๊ธฐ ์ํด stable diffusion์์ ๋ฏธ์ธ-์กฐ์ ๋์์ต๋๋ค. ์ด๋ฏธ์ง์ ํธ์ง์ ์ค๋ช
ํ๋ ํ๋กฌํํธ๋ฅผ ์
๋ ฅ์ผ๋ก ๋ฐ์ ํธ์ง๋ ์ด๋ฏธ์ง๋ฅผ ์ถ๋ ฅํฉ๋๋ค.
Instruct Pix2Pix๋ [InstructGPT](https://openai.com/blog/instruction-following/)์ ๊ฐ์ ํ๋กฌํํธ์ ์ ์๋ํ๋๋ก ๋ช
์์ ์ผ๋ก ํ๋ จ๋์์ต๋๋ค.
์ฌ์ฉ ๋ฐฉ๋ฒ์ ๋ํ ์์ธํ ๋ด์ฉ์ [์ฌ๊ธฐ](../api/pipelines/stable_diffusion/pix2pix)๋ฅผ ์ฐธ์กฐํ์ธ์.
## Pix2Pix Zero
[Paper](https://arxiv.org/abs/2302.03027)
[Pix2Pix Zero](../api/pipelines/stable_diffusion/pix2pix_zero)๋ฅผ ์ฌ์ฉํ๋ฉด ์ผ๋ฐ์ ์ธ ์ด๋ฏธ์ง ์๋ฏธ๋ฅผ ์ ์งํ๋ฉด์ ํ ๊ฐ๋
์ด๋ ํผ์ฌ์ฒด๊ฐ ๋ค๋ฅธ ๊ฐ๋
์ด๋ ํผ์ฌ์ฒด๋ก ๋ณํ๋๋๋ก ์ด๋ฏธ์ง๋ฅผ ์์ ํ ์ ์์ต๋๋ค.
๋
ธ์ด์ฆ ์ ๊ฑฐ ํ๋ก์ธ์ค๋ ํ ๊ฐ๋
์ ์๋ฒ ๋ฉ์์ ๋ค๋ฅธ ๊ฐ๋
์ ์๋ฒ ๋ฉ์ผ๋ก ์๋ด๋ฉ๋๋ค. ์ค๊ฐ ์ ๋ณต(intermediate latents)์ ๋๋
ธ์ด์ง(denoising?) ํ๋ก์ธ์ค ์ค์ ์ต์ ํ๋์ด ์ฐธ์กฐ ์ฃผ์ ์ง๋(reference attention maps)๋ฅผ ํฅํด ๋์๊ฐ๋๋ค. ์ฐธ์กฐ ์ฃผ์ ์ง๋(reference attention maps)๋ ์
๋ ฅ ์ด๋ฏธ์ง์ ๋
ธ์ด์ฆ ์ ๊ฑฐ(?) ํ๋ก์ธ์ค์์ ๋์จ ๊ฒ์ผ๋ก ์๋ฏธ ๋ณด์กด์ ์ฅ๋ คํ๋ ๋ฐ ์ฌ์ฉ๋ฉ๋๋ค.
Pix2Pix Zero๋ ํฉ์ฑ ์ด๋ฏธ์ง์ ์ค์ ์ด๋ฏธ์ง๋ฅผ ํธ์งํ๋ ๋ฐ ๋ชจ๋ ์ฌ์ฉํ ์ ์์ต๋๋ค.
- ํฉ์ฑ ์ด๋ฏธ์ง๋ฅผ ํธ์งํ๋ ค๋ฉด ๋จผ์ ์บก์
์ด ์ง์ ๋ ์ด๋ฏธ์ง๋ฅผ ์์ฑํฉ๋๋ค.
๋ค์์ผ๋ก ํธ์งํ ์ปจ์
๊ณผ ์๋ก์ด ํ๊ฒ ์ปจ์
์ ๋ํ ์ด๋ฏธ์ง ์บก์
์ ์์ฑํฉ๋๋ค. ์ด๋ฅผ ์ํด [Flan-T5](https://huggingface.co/docs/transformers/model_doc/flan-t5)์ ๊ฐ์ ๋ชจ๋ธ์ ์ฌ์ฉํ ์ ์์ต๋๋ค. ๊ทธ๋ฐ ๋ค์ ํ
์คํธ ์ธ์ฝ๋๋ฅผ ํตํด ์์ค ๊ฐ๋
๊ณผ ๋์ ๊ฐ๋
๋ชจ๋์ ๋ํ "ํ๊ท " ํ๋กฌํํธ ์๋ฒ ๋ฉ์ ์์ฑํฉ๋๋ค. ๋ง์ง๋ง์ผ๋ก, ํฉ์ฑ ์ด๋ฏธ์ง๋ฅผ ํธ์งํ๊ธฐ ์ํด pix2pix-zero ์๊ณ ๋ฆฌ์ฆ์ ์ฌ์ฉํฉ๋๋ค.
- ์ค์ ์ด๋ฏธ์ง๋ฅผ ํธ์งํ๋ ค๋ฉด ๋จผ์ [BLIP](https://huggingface.co/docs/transformers/model_doc/blip)๊ณผ ๊ฐ์ ๋ชจ๋ธ์ ์ฌ์ฉํ์ฌ ์ด๋ฏธ์ง ์บก์
์ ์์ฑํฉ๋๋ค. ๊ทธ๋ฐ ๋ค์ ํ๋กฌํํธ์ ์ด๋ฏธ์ง์ ddim ๋ฐ์ ์ ์ ์ฉํ์ฌ "์ญ(inverse)" latents์ ์์ฑํฉ๋๋ค. ์ด์ ๊ณผ ๋ง์ฐฌ๊ฐ์ง๋ก ์์ค ๋ฐ ๋์ ๊ฐ๋
๋ชจ๋์ ๋ํ "ํ๊ท (mean)" ํ๋กฌํํธ ์๋ฒ ๋ฉ์ด ์์ฑ๋๊ณ ๋ง์ง๋ง์ผ๋ก "์ญ(inverse)" latents์ ๊ฒฐํฉ๋ pix2pix-zero ์๊ณ ๋ฆฌ์ฆ์ด ์ด๋ฏธ์ง๋ฅผ ํธ์งํ๋ ๋ฐ ์ฌ์ฉ๋ฉ๋๋ค.
<Tip>
Pix2Pix Zero๋ '์ ๋ก ์ท(zero-shot)' ์ด๋ฏธ์ง ํธ์ง์ด ๊ฐ๋ฅํ ์ต์ด์ ๋ชจ๋ธ์
๋๋ค.
์ฆ, ์ด ๋ชจ๋ธ์ ๋ค์๊ณผ ๊ฐ์ด ์ผ๋ฐ ์๋น์์ฉ GPU์์ 1๋ถ ์ด๋ด์ ์ด๋ฏธ์ง๋ฅผ ํธ์งํ ์ ์์ต๋๋ค(../api/pipelines/stable_diffusion/pix2pix_zero#usage-example).
</Tip>
์์์ ์ธ๊ธํ๋ฏ์ด Pix2Pix Zero์๋ ํน์ ๊ฐ๋
์ผ๋ก ์ธ๋๋ฅผ ์ ๋ํ๊ธฐ ์ํด (UNet, VAE ๋๋ ํ
์คํธ ์ธ์ฝ๋๊ฐ ์๋) latents์ ์ต์ ํํ๋ ๊ธฐ๋ฅ์ด ํฌํจ๋์ด ์์ต๋๋ค.์ฆ, ์ ์ฒด ํ์ดํ๋ผ์ธ์ ํ์ค [StableDiffusionPipeline](../api/pipelines/stable_diffusion/text2img)๋ณด๋ค ๋ ๋ง์ ๋ฉ๋ชจ๋ฆฌ๊ฐ ํ์ํ ์ ์์ต๋๋ค.
์ฌ์ฉ ๋ฐฉ๋ฒ์ ๋ํ ์์ธํ ๋ด์ฉ์ [์ฌ๊ธฐ](../api/pipelines/stable_diffusion/pix2pix_zero)๋ฅผ ์ฐธ์กฐํ์ธ์.
## Attend and Excite
[Paper](https://arxiv.org/abs/2301.13826)
[Attend and Excite](../api/pipelines/stable_diffusion/attend_and_excite)๋ฅผ ์ฌ์ฉํ๋ฉด ํ๋กฌํํธ์ ํผ์ฌ์ฒด๊ฐ ์ต์ข
์ด๋ฏธ์ง์ ์ถฉ์คํ๊ฒ ํํ๋๋๋ก ํ ์ ์์ต๋๋ค.
์ด๋ฏธ์ง์ ์กด์ฌํด์ผ ํ๋ ํ๋กฌํํธ์ ํผ์ฌ์ฒด์ ํด๋นํ๋ ์ผ๋ จ์ ํ ํฐ ์ธ๋ฑ์ค๊ฐ ์
๋ ฅ์ผ๋ก ์ ๊ณต๋ฉ๋๋ค. ๋
ธ์ด์ฆ ์ ๊ฑฐ ์ค์ ๊ฐ ํ ํฐ ์ธ๋ฑ์ค๋ ์ด๋ฏธ์ง์ ์ต์ ํ ํจ์น ์ด์์ ๋ํด ์ต์ ์ฃผ์ ์๊ณ๊ฐ์ ๊ฐ๋๋ก ๋ณด์ฅ๋ฉ๋๋ค. ๋ชจ๋ ํผ์ฌ์ฒด ํ ํฐ์ ๋ํด ์ฃผ์ ์๊ณ๊ฐ์ด ํต๊ณผ๋ ๋๊น์ง ๋
ธ์ด์ฆ ์ ๊ฑฐ ํ๋ก์ธ์ค ์ค์ ์ค๊ฐ ์ ๋ณต๊ธฐ๊ฐ ๋ฐ๋ณต์ ์ผ๋ก ์ต์ ํ๋์ด ๊ฐ์ฅ ์ํํ ์ทจ๊ธ๋๋ ํผ์ฌ์ฒด ํ ํฐ์ ์ฃผ์๋ ฅ์ ๊ฐํํฉ๋๋ค.
Pix2Pix Zero์ ๋ง์ฐฌ๊ฐ์ง๋ก Attend and Excite ์ญ์ ํ์ดํ๋ผ์ธ์ ๋ฏธ๋ ์ต์ ํ ๋ฃจํ(์ฌ์ ํ์ต๋ ๊ฐ์ค์น๋ฅผ ๊ทธ๋๋ก ๋ ์ฑ)๊ฐ ํฌํจ๋๋ฉฐ, ์ผ๋ฐ์ ์ธ 'StableDiffusionPipeline'๋ณด๋ค ๋ ๋ง์ ๋ฉ๋ชจ๋ฆฌ๊ฐ ํ์ํ ์ ์์ต๋๋ค.
์ฌ์ฉ ๋ฐฉ๋ฒ์ ๋ํ ์์ธํ ๋ด์ฉ์ [์ฌ๊ธฐ](../api/pipelines/stable_diffusion/attend_and_excite)๋ฅผ ์ฐธ์กฐํ์ธ์.
## Semantic Guidance (SEGA)
[Paper](https://arxiv.org/abs/2301.12247)
์๋ฏธ์ ๋(SEGA)๋ฅผ ์ฌ์ฉํ๋ฉด ์ด๋ฏธ์ง์์ ํ๋ ์ด์์ ์ปจ์
์ ์ ์ฉํ๊ฑฐ๋ ์ ๊ฑฐํ ์ ์์ต๋๋ค. ์ปจ์
์ ๊ฐ๋๋ ์กฐ์ ํ ์ ์์ต๋๋ค. ์ฆ, ์ค๋ง์ผ ์ปจ์
์ ์ฌ์ฉํ์ฌ ์ธ๋ฌผ ์ฌ์ง์ ์ค๋ง์ผ์ ์ ์ง์ ์ผ๋ก ๋๋ฆฌ๊ฑฐ๋ ์ค์ผ ์ ์์ต๋๋ค.
๋ถ๋ฅ๊ธฐ ๋ฌด๋ฃ ์๋ด(classifier free guidance)๊ฐ ๋น ํ๋กฌํํธ ์
๋ ฅ์ ํตํด ์๋ด๋ฅผ ์ ๊ณตํ๋ ๋ฐฉ์๊ณผ ์ ์ฌํ๊ฒ, SEGA๋ ๊ฐ๋
ํ๋กฌํํธ์ ๋ํ ์๋ด๋ฅผ ์ ๊ณตํฉ๋๋ค. ์ด๋ฌํ ๊ฐ๋
ํ๋กฌํํธ๋ ์ฌ๋ฌ ๊ฐ๋ฅผ ๋์์ ์ ์ฉํ ์ ์์ต๋๋ค. ๊ฐ ๊ฐ๋
ํ๋กฌํํธ๋ ์๋ด๊ฐ ๊ธ์ ์ ์ผ๋ก ์ ์ฉ๋๋์ง ๋๋ ๋ถ์ ์ ์ผ๋ก ์ ์ฉ๋๋์ง์ ๋ฐ๋ผ ํด๋น ๊ฐ๋
์ ์ถ๊ฐํ๊ฑฐ๋ ์ ๊ฑฐํ ์ ์์ต๋๋ค.
Pix2Pix Zero ๋๋ Attend and Excite์ ๋ฌ๋ฆฌ SEGA๋ ๋ช
์์ ์ธ ๊ทธ๋ผ๋ฐ์ด์
๊ธฐ๋ฐ ์ต์ ํ๋ฅผ ์ํํ๋ ๋์ ํ์ฐ ํ๋ก์ธ์ค์ ์ง์ ์ํธ ์์ฉํฉ๋๋ค.
์ฌ์ฉ ๋ฐฉ๋ฒ์ ๋ํ ์์ธํ ๋ด์ฉ์ [์ฌ๊ธฐ](../api/pipelines/semantic_stable_diffusion)๋ฅผ ์ฐธ์กฐํ์ธ์.
## Self-attention Guidance (SAG)
[Paper](https://arxiv.org/abs/2210.00939)
[์๊ธฐ ์ฃผ์ ์๋ด](../api/pipelines/stable_diffusion/self_attention_guidance)๋ ์ด๋ฏธ์ง์ ์ ๋ฐ์ ์ธ ํ์ง์ ๊ฐ์ ํฉ๋๋ค.
SAG๋ ๊ณ ๋น๋ ์ธ๋ถ ์ ๋ณด๋ฅผ ๊ธฐ๋ฐ์ผ๋ก ํ์ง ์์ ์์ธก์์ ์์ ํ ์กฐ๊ฑดํ๋ ์ด๋ฏธ์ง์ ์ด๋ฅด๊ธฐ๊น์ง ๊ฐ์ด๋๋ฅผ ์ ๊ณตํฉ๋๋ค. ๊ณ ๋น๋ ๋ํ
์ผ์ UNet ์๊ธฐ ์ฃผ์ ๋งต์์ ์ถ์ถ๋ฉ๋๋ค.
์ฌ์ฉ ๋ฐฉ๋ฒ์ ๋ํ ์์ธํ ๋ด์ฉ์ [์ฌ๊ธฐ](../api/pipelines/stable_diffusion/self_attention_guidance)๋ฅผ ์ฐธ์กฐํ์ธ์.
## Depth2Image
[Project](https://huggingface.co/stabilityai/stable-diffusion-2-depth)
[Depth2Image](../pipelines/stable_diffusion_2#depthtoimage)๋ ํ
์คํธ ์๋ด ์ด๋ฏธ์ง ๋ณํ์ ๋ํ ์๋งจํฑ์ ๋ ์ ๋ณด์กดํ๋๋ก ์์ ์ ํ์ฐ์์ ๋ฏธ์ธ ์กฐ์ ๋์์ต๋๋ค.
์๋ณธ ์ด๋ฏธ์ง์ ๋จ์(monocular) ๊น์ด ์ถ์ ์น๋ฅผ ์กฐ๊ฑด์ผ๋ก ํฉ๋๋ค.
์ฌ์ฉ ๋ฐฉ๋ฒ์ ๋ํ ์์ธํ ๋ด์ฉ์ [์ฌ๊ธฐ](../api/pipelines/stable_diffusion_2#depthtoimage)๋ฅผ ์ฐธ์กฐํ์ธ์.
<Tip>
InstructPix2Pix์ Pix2Pix Zero์ ๊ฐ์ ๋ฐฉ๋ฒ์ ์ค์ํ ์ฐจ์ด์ ์ ์ ์์ ๊ฒฝ์ฐ
๋ ์ฌ์ ํ์ต๋ ๊ฐ์ค์น๋ฅผ ๋ฏธ์ธ ์กฐ์ ํ๋ ๋ฐ๋ฉด, ํ์๋ ๊ทธ๋ ์ง ์๋ค๋ ๊ฒ์
๋๋ค. ์ฆ, ๋ค์์ ์ํํ ์ ์์ต๋๋ค.
์ฌ์ฉ ๊ฐ๋ฅํ ๋ชจ๋ ์์ ์ ํ์ฐ ๋ชจ๋ธ์ Pix2Pix Zero๋ฅผ ์ ์ฉํ ์ ์์ต๋๋ค.
</Tip>
## MultiDiffusion Panorama
[Paper](https://arxiv.org/abs/2302.08113)
MultiDiffusion์ ์ฌ์ ํ์ต๋ diffusion model์ ํตํด ์๋ก์ด ์์ฑ ํ๋ก์ธ์ค๋ฅผ ์ ์ํฉ๋๋ค. ์ด ํ๋ก์ธ์ค๋ ๊ณ ํ์ง์ ๋ค์ํ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ๋ ๋ฐ ์ฝ๊ฒ ์ ์ฉํ ์ ์๋ ์ฌ๋ฌ diffusion ์์ฑ ๋ฐฉ๋ฒ์ ํ๋๋ก ๋ฌถ์ต๋๋ค. ๊ฒฐ๊ณผ๋ ์ํ๋ ์ข
ํก๋น(์: ํ๋
ธ๋ผ๋ง) ๋ฐ ํ์ดํธํ ๋ถํ ๋ง์คํฌ์์ ๋ฐ์ด๋ฉ ๋ฐ์ค์ ์ด๋ฅด๋ ๊ณต๊ฐ ์๋ด ์ ํธ์ ๊ฐ์ ์ฌ์ฉ์๊ฐ ์ ๊ณตํ ์ ์ด๋ฅผ ์ค์ํฉ๋๋ค.
[MultiDiffusion ํ๋
ธ๋ผ๋ง](../api/pipelines/stable_diffusion/panorama)๋ฅผ ์ฌ์ฉํ๋ฉด ์์์ ์ข
ํก๋น(์: ํ๋
ธ๋ผ๋ง)๋ก ๊ณ ํ์ง ์ด๋ฏธ์ง๋ฅผ ์์ฑํ ์ ์์ต๋๋ค.
ํ๋
ธ๋ผ๋ง ์ด๋ฏธ์ง๋ฅผ ์์ฑํ๋ ๋ฐ ์ฌ์ฉํ๋ ๋ฐฉ๋ฒ์ ๋ํ ์์ธํ ๋ด์ฉ์ [์ฌ๊ธฐ](../api/pipelines/stable_diffusion/panorama)๋ฅผ ์ฐธ์กฐํ์ธ์.
## ๋๋ง์ ๋ชจ๋ธ ํ์ธํ๋
์ฌ์ ํ์ต๋ ๋ชจ๋ธ ์ธ์๋ Diffusers๋ ์ฌ์ฉ์๊ฐ ์ ๊ณตํ ๋ฐ์ดํฐ์ ๋ํด ๋ชจ๋ธ์ ํ์ธํ๋ํ ์ ์๋ ํ์ต ์คํฌ๋ฆฝํธ๊ฐ ์์ต๋๋ค.
## DreamBooth
[DreamBooth](../training/dreambooth)๋ ๋ชจ๋ธ์ ํ์ธํ๋ํ์ฌ ์๋ก์ด ์ฃผ์ ์ ๋ํด ๊ฐ๋ฅด์นฉ๋๋ค. ์ฆ, ํ ์ฌ๋์ ์ฌ์ง ๋ช ์ฅ์ ์ฌ์ฉํ์ฌ ๋ค์ํ ์คํ์ผ๋ก ๊ทธ ์ฌ๋์ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ ์ ์์ต๋๋ค.
์ฌ์ฉ ๋ฐฉ๋ฒ์ ๋ํ ์์ธํ ๋ด์ฉ์ [์ฌ๊ธฐ](../training/dreambooth)๋ฅผ ์ฐธ์กฐํ์ธ์.
## Textual Inversion
[Textual Inversion](../training/text_inversion)์ ๋ชจ๋ธ์ ํ์ธํ๋ํ์ฌ ์๋ก์ด ๊ฐ๋
์ ๋ํด ํ์ต์ํต๋๋ค. ์ฆ, ํน์ ์คํ์ผ์ ์ํธ์ ์ฌ์ง ๋ช ์ฅ์ ์ฌ์ฉํ์ฌ ํด๋น ์คํ์ผ์ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ ์ ์์ต๋๋ค.
์ฌ์ฉ ๋ฐฉ๋ฒ์ ๋ํ ์์ธํ ๋ด์ฉ์ [์ฌ๊ธฐ](../training/text_inversion)๋ฅผ ์ฐธ์กฐํ์ธ์.
## ControlNet
[Paper](https://arxiv.org/abs/2302.05543)
[ControlNet](../api/pipelines/stable_diffusion/controlnet)์ ์ถ๊ฐ ์กฐ๊ฑด์ ์ถ๊ฐํ๋ ๋ณด์กฐ ๋คํธ์ํฌ์
๋๋ค.
๊ฐ์ฅ์๋ฆฌ ๊ฐ์ง, ๋์, ๊น์ด ๋งต, ์๋ฏธ์ ์ธ๊ทธ๋จผํธ์ ๊ฐ์ ๋ค์ํ ์กฐ๊ฑด์ ๋ํด ํ๋ จ๋ 8๊ฐ์ ํ์ค ์ฌ์ ํ๋ จ๋ ControlNet์ด ์์ต๋๋ค,
๊น์ด ๋งต, ์๋งจํฑ ์ธ๊ทธ๋จผํ
์ด์
๊ณผ ๊ฐ์ ๋ค์ํ ์กฐ๊ฑด์ผ๋ก ํ๋ จ๋ 8๊ฐ์ ํ์ค ์ ์ด๋ง์ด ์์ต๋๋ค.
์ฌ์ฉ ๋ฐฉ๋ฒ์ ๋ํ ์์ธํ ๋ด์ฉ์ [์ฌ๊ธฐ](../api/pipelines/stable_diffusion/controlnet)๋ฅผ ์ฐธ์กฐํ์ธ์.
## Prompt Weighting
ํ๋กฌํํธ ๊ฐ์ค์น๋ ํ
์คํธ์ ํน์ ๋ถ๋ถ์ ๋ ๋ง์ ๊ด์ฌ ๊ฐ์ค์น๋ฅผ ๋ถ์ฌํ๋ ๊ฐ๋จํ ๊ธฐ๋ฒ์
๋๋ค.
์
๋ ฅ์ ๊ฐ์ค์น๋ฅผ ๋ถ์ฌํ๋ ๊ฐ๋จํ ๊ธฐ๋ฒ์
๋๋ค.
์์ธํ ์ค๋ช
๊ณผ ์์๋ [์ฌ๊ธฐ](../using-diffusers/weighted_prompts)๋ฅผ ์ฐธ์กฐํ์ธ์.
## Custom Diffusion
[Custom Diffusion](../training/custom_diffusion)์ ์ฌ์ ํ์ต๋ text-to-image ๊ฐ ํ์ฐ ๋ชจ๋ธ์ ๊ต์ฐจ ๊ด์ฌ๋ ๋งต๋ง ๋ฏธ์ธ ์กฐ์ ํฉ๋๋ค.
๋ํ textual inversion์ ์ถ๊ฐ๋ก ์ํํ ์ ์์ต๋๋ค. ์ค๊ณ์ ๋ค์ค ๊ฐ๋
ํ๋ จ์ ์ง์ํฉ๋๋ค.
DreamBooth ๋ฐ Textual Inversion ๋ง์ฐฌ๊ฐ์ง๋ก, ์ฌ์ฉ์ ์ง์ ํ์ฐ์ ์ฌ์ ํ์ต๋ text-to-image diffusion ๋ชจ๋ธ์ ์๋ก์ด ๊ฐ๋
์ ํ์ต์์ผ ๊ด์ฌ ์๋ ๊ฐ๋
๊ณผ ๊ด๋ จ๋ ์ถ๋ ฅ์ ์์ฑํ๋ ๋ฐ์๋ ์ฌ์ฉ๋ฉ๋๋ค.
์์ธํ ์ค๋ช
์ [๊ณต์ ๋ฌธ์](../training/custom_diffusion)๋ฅผ ์ฐธ์กฐํ์ธ์.
## Model Editing
[Paper](https://arxiv.org/abs/2303.08084)
[ํ
์คํธ-์ด๋ฏธ์ง ๋ชจ๋ธ ํธ์ง ํ์ดํ๋ผ์ธ](../api/pipelines/model_editing)์ ์ฌ์ฉํ๋ฉด ์ฌ์ ํ์ต๋ text-to-image diffusion ๋ชจ๋ธ์ด ์
๋ ฅ ํ๋กฌํํธ์ ์๋ ํผ์ฌ์ฒด์ ๋ํด ๋ด๋ฆด ์ ์๋ ์๋ชป๋ ์์์ ๊ฐ์ ์ ์ํํ๋ ๋ฐ ๋์์ด ๋ฉ๋๋ค.
์๋ฅผ ๋ค์ด, ์์ ์ ํ์ฐ์ "A pack of roses"์ ๋ํ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ๋ผ๋ ๋ฉ์์ง๋ฅผ ํ์ํ๋ฉด ์์ฑ๋ ์ด๋ฏธ์ง์ ์ฅ๋ฏธ๋ ๋นจ๊ฐ์์ผ ๊ฐ๋ฅ์ฑ์ด ๋์ต๋๋ค. ์ด ํ์ดํ๋ผ์ธ์ ์ด๋ฌํ ๊ฐ์ ์ ๋ณ๊ฒฝํ๋ ๋ฐ ๋์์ด ๋ฉ๋๋ค.
์์ธํ ์ค๋ช
์ [๊ณต์ ๋ฌธ์](../api/pipelines/model_editing)๋ฅผ ์ฐธ์กฐํ์ธ์.
## DiffEdit
[Paper](https://arxiv.org/abs/2210.11427)
[DiffEdit](../api/pipelines/diffedit)๋ฅผ ์ฌ์ฉํ๋ฉด ์๋ณธ ์
๋ ฅ ์ด๋ฏธ์ง๋ฅผ ์ต๋ํ ๋ณด์กดํ๋ฉด์ ์
๋ ฅ ํ๋กฌํํธ์ ํจ๊ป ์
๋ ฅ ์ด๋ฏธ์ง์ ์๋ฏธ๋ก ์ ํธ์ง์ด ๊ฐ๋ฅํฉ๋๋ค.
์์ธํ ์ค๋ช
์ [๊ณต์ ๋ฌธ์](../api/pipelines/diffedit)๋ฅผ ์ฐธ์กฐํ์ธ์.
## T2I-Adapter
[Paper](https://arxiv.org/abs/2302.08453)
[T2I-์ด๋ํฐ](../api/pipelines/stable_diffusion/adapter)๋ ์ถ๊ฐ์ ์ธ ์กฐ๊ฑด์ ์ถ๊ฐํ๋ auxiliary ๋คํธ์ํฌ์
๋๋ค.
๊ฐ์ฅ์๋ฆฌ ๊ฐ์ง, ์ค์ผ์น, depth maps, semantic segmentations์ ๊ฐ์ ๋ค์ํ ์กฐ๊ฑด์ ๋ํด ํ๋ จ๋ 8๊ฐ์ ํ์ค ์ฌ์ ํ๋ จ๋ adapter๊ฐ ์์ต๋๋ค,
[๊ณต์ ๋ฌธ์](api/pipelines/stable_diffusion/adapter)์์ ์ฌ์ฉ ๋ฐฉ๋ฒ์ ๋ํ ์ ๋ณด๋ฅผ ์ฐธ์กฐํ์ธ์.
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/using-diffusers/textual_inversion_inference.md
|
# Textual inversion
[[open-in-colab]]
[`StableDiffusionPipeline`]์ textual-inversion์ ์ง์ํ๋๋ฐ, ์ด๋ ๋ช ๊ฐ์ ์ํ ์ด๋ฏธ์ง๋ง์ผ๋ก stable diffusion๊ณผ ๊ฐ์ ๋ชจ๋ธ์ด ์๋ก์ด ์ปจ์
์ ํ์ตํ ์ ์๋๋ก ํ๋ ๊ธฐ๋ฒ์
๋๋ค. ์ด๋ฅผ ํตํด ์์ฑ๋ ์ด๋ฏธ์ง๋ฅผ ๋ ์ ์ ์ดํ๊ณ ํน์ ์ปจ์
์ ๋ง๊ฒ ๋ชจ๋ธ์ ์กฐ์ ํ ์ ์์ต๋๋ค. ์ปค๋ฎค๋ํฐ์์ ๋ง๋ค์ด์ง ์ปจ์
๋ค์ ์ปฌ๋ ์
์ [Stable Diffusion Conceptualizer](https://huggingface.co/spaces/sd-concepts-library/stable-diffusion-conceptualizer)๋ฅผ ํตํด ๋น ๋ฅด๊ฒ ์ฌ์ฉํด๋ณผ ์ ์์ต๋๋ค.
์ด ๊ฐ์ด๋์์๋ Stable Diffusion Conceptualizer์์ ์ฌ์ ํ์ตํ ์ปจ์
์ ์ฌ์ฉํ์ฌ textual-inversion์ผ๋ก ์ถ๋ก ์ ์คํํ๋ ๋ฐฉ๋ฒ์ ๋ณด์ฌ๋๋ฆฝ๋๋ค. textual-inversion์ผ๋ก ๋ชจ๋ธ์ ์๋ก์ด ์ปจ์
์ ํ์ต์ํค๋ ๋ฐ ๊ด์ฌ์ด ์์ผ์๋ค๋ฉด, [Textual Inversion](./training/text_inversion) ํ๋ จ ๊ฐ์ด๋๋ฅผ ์ฐธ์กฐํ์ธ์.
Hugging Face ๊ณ์ ์ผ๋ก ๋ก๊ทธ์ธํ์ธ์:
```py
from huggingface_hub import notebook_login
notebook_login()
```
ํ์ํ ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ฅผ ๋ถ๋ฌ์ค๊ณ ์์ฑ๋ ์ด๋ฏธ์ง๋ฅผ ์๊ฐํํ๊ธฐ ์ํ ๋์ฐ๋ฏธ ํจ์ `image_grid`๋ฅผ ๋ง๋ญ๋๋ค:
```py
import os
import torch
import PIL
from PIL import Image
from diffusers import StableDiffusionPipeline
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
def image_grid(imgs, rows, cols):
assert len(imgs) == rows * cols
w, h = imgs[0].size
grid = Image.new("RGB", size=(cols * w, rows * h))
grid_w, grid_h = grid.size
for i, img in enumerate(imgs):
grid.paste(img, box=(i % cols * w, i // cols * h))
return grid
```
Stable Diffusion๊ณผ [Stable Diffusion Conceptualizer](https://huggingface.co/spaces/sd-concepts-library/stable-diffusion-conceptualizer)์์ ์ฌ์ ํ์ต๋ ์ปจ์
์ ์ ํํฉ๋๋ค:
```py
pretrained_model_name_or_path = "runwayml/stable-diffusion-v1-5"
repo_id_embeds = "sd-concepts-library/cat-toy"
```
์ด์ ํ์ดํ๋ผ์ธ์ ๋ก๋ํ๊ณ ์ฌ์ ํ์ต๋ ์ปจ์
์ ํ์ดํ๋ผ์ธ์ ์ ๋ฌํ ์ ์์ต๋๋ค:
```py
pipeline = StableDiffusionPipeline.from_pretrained(pretrained_model_name_or_path, torch_dtype=torch.float16).to("cuda")
pipeline.load_textual_inversion(repo_id_embeds)
```
ํน๋ณํ placeholder token '`<cat-toy>`'๋ฅผ ์ฌ์ฉํ์ฌ ์ฌ์ ํ์ต๋ ์ปจ์
์ผ๋ก ํ๋กฌํํธ๋ฅผ ๋ง๋ค๊ณ , ์์ฑํ ์ํ์ ์์ ์ด๋ฏธ์ง ํ์ ์๋ฅผ ์ ํํฉ๋๋ค:
```py
prompt = "a grafitti in a favela wall with a <cat-toy> on it"
num_samples = 2
num_rows = 2
```
๊ทธ๋ฐ ๋ค์ ํ์ดํ๋ผ์ธ์ ์คํํ๊ณ , ์์ฑ๋ ์ด๋ฏธ์ง๋ค์ ์ ์ฅํฉ๋๋ค. ๊ทธ๋ฆฌ๊ณ ์ฒ์์ ๋ง๋ค์๋ ๋์ฐ๋ฏธ ํจ์ `image_grid`๋ฅผ ์ฌ์ฉํ์ฌ ์์ฑ ๊ฒฐ๊ณผ๋ค์ ์๊ฐํํฉ๋๋ค. ์ด ๋ `num_inference_steps`์ `guidance_scale`๊ณผ ๊ฐ์ ๋งค๊ฐ ๋ณ์๋ค์ ์กฐ์ ํ์ฌ, ์ด๊ฒ๋ค์ด ์ด๋ฏธ์ง ํ์ง์ ์ด๋ ํ ์ํฅ์ ๋ฏธ์น๋์ง๋ฅผ ์์ ๋กญ๊ฒ ํ์ธํด๋ณด์๊ธฐ ๋ฐ๋๋๋ค.
```py
all_images = []
for _ in range(num_rows):
images = pipe(prompt, num_images_per_prompt=num_samples, num_inference_steps=50, guidance_scale=7.5).images
all_images.extend(images)
grid = image_grid(all_images, num_samples, num_rows)
grid
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/textual_inversion_inference.png">
</div>
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/using-diffusers/img2img.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# ํ
์คํธ ๊ธฐ๋ฐ image-to-image ์์ฑ
[[open-in-colab]]
[`StableDiffusionImg2ImgPipeline`]์ ์ฌ์ฉํ๋ฉด ํ
์คํธ ํ๋กฌํํธ์ ์์ ์ด๋ฏธ์ง๋ฅผ ์ ๋ฌํ์ฌ ์ ์ด๋ฏธ์ง ์์ฑ์ ์กฐ๊ฑด์ ์ง์ ํ ์ ์์ต๋๋ค.
์์ํ๊ธฐ ์ ์ ํ์ํ ๋ผ์ด๋ธ๋ฌ๋ฆฌ๊ฐ ๋ชจ๋ ์ค์น๋์ด ์๋์ง ํ์ธํ์ธ์:
```bash
!pip install diffusers transformers ftfy accelerate
```
[`nitrosocke/Ghibli-Diffusion`](https://huggingface.co/nitrosocke/Ghibli-Diffusion)๊ณผ ๊ฐ์ ์ฌ์ ํ์ต๋ stable diffusion ๋ชจ๋ธ๋ก [`StableDiffusionImg2ImgPipeline`]์ ์์ฑํ์ฌ ์์ํ์ธ์.
```python
import torch
import requests
from PIL import Image
from io import BytesIO
from diffusers import StableDiffusionImg2ImgPipeline
device = "cuda"
pipe = StableDiffusionImg2ImgPipeline.from_pretrained("nitrosocke/Ghibli-Diffusion", torch_dtype=torch.float16).to(
device
)
```
์ด๊ธฐ ์ด๋ฏธ์ง๋ฅผ ๋ค์ด๋ก๋ํ๊ณ ์ฌ์ ์ฒ๋ฆฌํ์ฌ ํ์ดํ๋ผ์ธ์ ์ ๋ฌํ ์ ์์ต๋๋ค:
```python
url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
response = requests.get(url)
init_image = Image.open(BytesIO(response.content)).convert("RGB")
init_image.thumbnail((768, 768))
init_image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/image_2_image_using_diffusers_cell_8_output_0.jpeg"/>
</div>
<Tip>
๐ก `strength`๋ ์
๋ ฅ ์ด๋ฏธ์ง์ ์ถ๊ฐ๋๋ ๋
ธ์ด์ฆ์ ์์ ์ ์ดํ๋ 0.0์์ 1.0 ์ฌ์ด์ ๊ฐ์
๋๋ค. 1.0์ ๊ฐ๊น์ด ๊ฐ์ ๋ค์ํ ๋ณํ์ ํ์ฉํ์ง๋ง ์
๋ ฅ ์ด๋ฏธ์ง์ ์๋ฏธ์ ์ผ๋ก ์ผ์นํ์ง ์๋ ์ด๋ฏธ์ง๋ฅผ ์์ฑํฉ๋๋ค.
</Tip>
ํ๋กฌํํธ๋ฅผ ์ ์ํ๊ณ (์ง๋ธ๋ฆฌ ์คํ์ผ(Ghibli-style)์ ๋ง๊ฒ ์กฐ์ ๋ ์ด ์ฒดํฌํฌ์ธํธ์ ๊ฒฝ์ฐ ํ๋กฌํํธ ์์ `ghibli style` ํ ํฐ์ ๋ถ์ฌ์ผ ํฉ๋๋ค) ํ์ดํ๋ผ์ธ์ ์คํํฉ๋๋ค:
```python
prompt = "ghibli style, a fantasy landscape with castles"
generator = torch.Generator(device=device).manual_seed(1024)
image = pipe(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5, generator=generator).images[0]
image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ghibli-castles.png"/>
</div>
๋ค๋ฅธ ์ค์ผ์ค๋ฌ๋ก ์คํํ์ฌ ์ถ๋ ฅ์ ์ด๋ค ์ํฅ์ ๋ฏธ์น๋์ง ํ์ธํ ์๋ ์์ต๋๋ค:
```python
from diffusers import LMSDiscreteScheduler
lms = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.scheduler = lms
generator = torch.Generator(device=device).manual_seed(1024)
image = pipe(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5, generator=generator).images[0]
image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lms-ghibli.png"/>
</div>
์๋ ๊ณต๋ฐฑ์ ํ์ธํ๊ณ `strength` ๊ฐ์ ๋ค๋ฅด๊ฒ ์ค์ ํ์ฌ ์ด๋ฏธ์ง๋ฅผ ์์ฑํด ๋ณด์ธ์. `strength`๋ฅผ ๋ฎ๊ฒ ์ค์ ํ๋ฉด ์๋ณธ ์ด๋ฏธ์ง์ ๋ ์ ์ฌํ ์ด๋ฏธ์ง๊ฐ ์์ฑ๋๋ ๊ฒ์ ํ์ธํ ์ ์์ต๋๋ค.
์์ ๋กญ๊ฒ ์ค์ผ์ค๋ฌ๋ฅผ [`LMSDiscreteScheduler`]๋ก ์ ํํ์ฌ ์ถ๋ ฅ์ ์ด๋ค ์ํฅ์ ๋ฏธ์น๋์ง ํ์ธํด ๋ณด์ธ์.
<iframe
src="https://stevhliu-ghibli-img2img.hf.space"
frameborder="0"
width="850"
height="500"
></iframe>
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/using-diffusers/loading_overview.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Overview
๐งจ Diffusers๋ ์์ฑ ์์
์ ์ํ ๋ค์ํ ํ์ดํ๋ผ์ธ, ๋ชจ๋ธ, ์ค์ผ์ค๋ฌ๋ฅผ ์ ๊ณตํฉ๋๋ค. ์ด๋ฌํ ์ปดํฌ๋ํธ๋ฅผ ์ต๋ํ ๊ฐ๋จํ๊ฒ ๋ก๋ํ ์ ์๋๋ก ๋จ์ผ ํตํฉ ๋ฉ์๋์ธ `from_pretrained()`๋ฅผ ์ ๊ณตํ์ฌ Hugging Face [Hub](https://huggingface.co/models?library=diffusers&sort=downloads) ๋๋ ๋ก์ปฌ ๋จธ์ ์์ ์ด๋ฌํ ์ปดํฌ๋ํธ๋ฅผ ๋ถ๋ฌ์ฌ ์ ์์ต๋๋ค. ํ์ดํ๋ผ์ธ์ด๋ ๋ชจ๋ธ์ ๋ก๋ํ ๋๋ง๋ค, ์ต์ ํ์ผ์ด ์๋์ผ๋ก ๋ค์ด๋ก๋๋๊ณ ์บ์๋๋ฏ๋ก, ๋ค์์ ํ์ผ์ ๋ค์ ๋ค์ด๋ก๋ํ์ง ์๊ณ ๋ ๋น ๋ฅด๊ฒ ์ฌ์ฌ์ฉํ ์ ์์ต๋๋ค.
์ด ์น์
์ ํ์ดํ๋ผ์ธ ๋ก๋ฉ, ํ์ดํ๋ผ์ธ์์ ๋ค์ํ ์ปดํฌ๋ํธ๋ฅผ ๋ก๋ํ๋ ๋ฐฉ๋ฒ, ์ฒดํฌํฌ์ธํธ variants๋ฅผ ๋ถ๋ฌ์ค๋ ๋ฐฉ๋ฒ, ๊ทธ๋ฆฌ๊ณ ์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ์ ๋ถ๋ฌ์ค๋ ๋ฐฉ๋ฒ์ ๋ํด ์์์ผ ํ ๋ชจ๋ ๊ฒ๋ค์ ๋ค๋ฃน๋๋ค. ๋ํ ์ค์ผ์ค๋ฌ๋ฅผ ๋ถ๋ฌ์ค๋ ๋ฐฉ๋ฒ๊ณผ ์๋ก ๋ค๋ฅธ ์ค์ผ์ค๋ฌ๋ฅผ ์ฌ์ฉํ ๋ ๋ฐ์ํ๋ ์๋์ ํ์ง๊ฐ์ ํธ๋ ์ด๋ ์คํ๋ฅผ ๋น๊ตํ๋ ๋ฐฉ๋ฒ ์ญ์ ๋ค๋ฃน๋๋ค. ๊ทธ๋ฆฌ๊ณ ๋ง์ง๋ง์ผ๋ก ๐งจ Diffusers์ ํจ๊ป ํ์ดํ ์น์์ ์ฌ์ฉํ ์ ์๋๋ก KerasCV ์ฒดํฌํฌ์ธํธ๋ฅผ ๋ณํํ๊ณ ๋ถ๋ฌ์ค๋ ๋ฐฉ๋ฒ์ ์ดํด๋ด
๋๋ค.
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/using-diffusers/other-formats.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# ๋ค์ํ Stable Diffusion ํฌ๋งท ๋ถ๋ฌ์ค๊ธฐ
Stable Diffusion ๋ชจ๋ธ๋ค์ ํ์ต ๋ฐ ์ ์ฅ๋ ํ๋ ์์ํฌ์ ๋ค์ด๋ก๋ ์์น์ ๋ฐ๋ผ ๋ค์ํ ํ์์ผ๋ก ์ ๊ณต๋ฉ๋๋ค. ์ด๋ฌํ ํ์์ ๐ค Diffusers์์ ์ฌ์ฉํ ์ ์๋๋ก ๋ณํํ๋ฉด ์ถ๋ก ์ ์ํ [๋ค์ํ ์ค์ผ์ค๋ฌ ์ฌ์ฉ](schedulers), ์ฌ์ฉ์ ์ง์ ํ์ดํ๋ผ์ธ ๊ตฌ์ถ, ์ถ๋ก ์๋ ์ต์ ํ๋ฅผ ์ํ ๋ค์ํ ๊ธฐ๋ฒ๊ณผ ๋ฐฉ๋ฒ ๋ฑ ๋ผ์ด๋ธ๋ฌ๋ฆฌ์์ ์ง์ํ๋ ๋ชจ๋ ๊ธฐ๋ฅ์ ์ฌ์ฉํ ์ ์์ต๋๋ค.
<Tip>
์ฐ๋ฆฌ๋ `.safetensors` ํ์์ ์ถ์ฒํฉ๋๋ค. ์๋ํ๋ฉด ๊ธฐ์กด์ pickled ํ์ผ์ ์ทจ์ฝํ๊ณ ๋จธ์ ์์ ์ฝ๋๋ฅผ ์คํํ ๋ ์
์ฉ๋ ์ ์๋ ๊ฒ์ ๋นํด ํจ์ฌ ๋ ์์ ํฉ๋๋ค. (safetensors ๋ถ๋ฌ์ค๊ธฐ ๊ฐ์ด๋์์ ์์ธํ ์์๋ณด์ธ์.)
</Tip>
์ด ๊ฐ์ด๋์์๋ ๋ค๋ฅธ Stable Diffusion ํ์์ ๐ค Diffusers์ ํธํ๋๋๋ก ๋ณํํ๋ ๋ฐฉ๋ฒ์ ์ค๋ช
ํฉ๋๋ค.
## PyTorch .ckpt
์ฒดํฌํฌ์ธํธ ๋๋ `.ckpt` ํ์์ ์ผ๋ฐ์ ์ผ๋ก ๋ชจ๋ธ์ ์ ์ฅํ๋ ๋ฐ ์ฌ์ฉ๋ฉ๋๋ค. `.ckpt` ํ์ผ์ ์ ์ฒด ๋ชจ๋ธ์ ํฌํจํ๋ฉฐ ์ผ๋ฐ์ ์ผ๋ก ํฌ๊ธฐ๊ฐ ๋ช GB์
๋๋ค. `.ckpt` ํ์ผ์ [~StableDiffusionPipeline.from_ckpt] ๋ฉ์๋๋ฅผ ์ฌ์ฉํ์ฌ ์ง์ ๋ถ๋ฌ์์ ์ฌ์ฉํ ์๋ ์์ง๋ง, ์ผ๋ฐ์ ์ผ๋ก ๋ ๊ฐ์ง ํ์์ ๋ชจ๋ ์ฌ์ฉํ ์ ์๋๋ก `.ckpt` ํ์ผ์ ๐ค Diffusers๋ก ๋ณํํ๋ ๊ฒ์ด ๋ ์ข์ต๋๋ค.
`.ckpt` ํ์ผ์ ๋ณํํ๋ ๋ ๊ฐ์ง ์ต์
์ด ์์ต๋๋ค. Space๋ฅผ ์ฌ์ฉํ์ฌ ์ฒดํฌํฌ์ธํธ๋ฅผ ๋ณํํ๊ฑฐ๋ ์คํฌ๋ฆฝํธ๋ฅผ ์ฌ์ฉํ์ฌ `.ckpt` ํ์ผ์ ๋ณํํฉ๋๋ค.
### Space๋ก ๋ณํํ๊ธฐ
`.ckpt` ํ์ผ์ ๋ณํํ๋ ๊ฐ์ฅ ์ฝ๊ณ ํธ๋ฆฌํ ๋ฐฉ๋ฒ์ SD์์ Diffusers๋ก ์คํ์ด์ค๋ฅผ ์ฌ์ฉํ๋ ๊ฒ์
๋๋ค. Space์ ์ง์นจ์ ๋ฐ๋ผ .ckpt ํ์ผ์ ๋ณํ ํ ์ ์์ต๋๋ค.
์ด ์ ๊ทผ ๋ฐฉ์์ ๊ธฐ๋ณธ ๋ชจ๋ธ์์๋ ์ ์๋ํ์ง๋ง ๋ ๋ง์ ์ฌ์ฉ์ ์ ์ ๋ชจ๋ธ์์๋ ์ด๋ ค์์ ๊ฒช์ ์ ์์ต๋๋ค. ๋น pull request๋ ์ค๋ฅ๋ฅผ ๋ฐํํ๋ฉด Space๊ฐ ์คํจํ ๊ฒ์
๋๋ค.
์ด ๊ฒฝ์ฐ ์คํฌ๋ฆฝํธ๋ฅผ ์ฌ์ฉํ์ฌ `.ckpt` ํ์ผ์ ๋ณํํด ๋ณผ ์ ์์ต๋๋ค.
### ์คํฌ๋ฆฝํธ๋ก ๋ณํํ๊ธฐ
๐ค Diffusers๋ `.ckpt`ย ํ์ผ ๋ณํ์ ์ํ ๋ณํ ์คํฌ๋ฆฝํธ๋ฅผ ์ ๊ณตํฉ๋๋ค. ์ด ์ ๊ทผ ๋ฐฉ์์ ์์ Space๋ณด๋ค ๋ ์์ ์ ์
๋๋ค.
์์ํ๊ธฐ ์ ์ ์คํฌ๋ฆฝํธ๋ฅผ ์คํํ ๐ค Diffusers์ ๋ก์ปฌ ํด๋ก (clone)์ด ์๋์ง ํ์ธํ๊ณ Hugging Face ๊ณ์ ์ ๋ก๊ทธ์ธํ์ฌ pull request๋ฅผ ์ด๊ณ ๋ณํ๋ ๋ชจ๋ธ์ ํ๋ธ์ ํธ์ํ ์ ์๋๋ก ํ์ธ์.
```bash
huggingface-cli login
```
์คํฌ๋ฆฝํธ๋ฅผ ์ฌ์ฉํ๋ ค๋ฉด:
1. ๋ณํํ๋ ค๋ `.ckpt`ย ํ์ผ์ด ํฌํจ๋ ๋ฆฌํฌ์งํ ๋ฆฌ๋ฅผ Git์ผ๋ก ํด๋ก (clone)ํฉ๋๋ค.
์ด ์์ ์์๋ TemporalNet .ckpt ํ์ผ์ ๋ณํํด ๋ณด๊ฒ ์ต๋๋ค:
```bash
git lfs install
git clone https://huggingface.co/CiaraRowles/TemporalNet
```
2. ์ฒดํฌํฌ์ธํธ๋ฅผ ๋ณํํ ๋ฆฌํฌ์งํ ๋ฆฌ์์ pull request๋ฅผ ์ฝ๋๋ค:
```bash
cd TemporalNet && git fetch origin refs/pr/13:pr/13
git checkout pr/13
```
3. ๋ณํ ์คํฌ๋ฆฝํธ์์ ๊ตฌ์ฑํ ์
๋ ฅ ์ธ์๋ ์ฌ๋ฌ ๊ฐ์ง๊ฐ ์์ง๋ง ๊ฐ์ฅ ์ค์ํ ์ธ์๋ ๋ค์๊ณผ ๊ฐ์ต๋๋ค:
- `checkpoint_path`: ๋ณํํ `.ckpt` ํ์ผ์ ๊ฒฝ๋ก๋ฅผ ์
๋ ฅํฉ๋๋ค.
- `original_config_file`: ์๋ ์ํคํ
์ฒ์ ๊ตฌ์ฑ์ ์ ์ํ๋ YAML ํ์ผ์
๋๋ค. ์ด ํ์ผ์ ์ฐพ์ ์ ์๋ ๊ฒฝ์ฐ `.ckpt` ํ์ผ์ ์ฐพ์ GitHub ๋ฆฌํฌ์งํ ๋ฆฌ์์ YAML ํ์ผ์ ๊ฒ์ํด ๋ณด์ธ์.
- `dump_path`: ๋ณํ๋ ๋ชจ๋ธ์ ๊ฒฝ๋ก
์๋ฅผ ๋ค์ด, TemporalNet ๋ชจ๋ธ์ Stable Diffusion v1.5 ๋ฐ ControlNet ๋ชจ๋ธ์ด๊ธฐ ๋๋ฌธ์ ControlNet ๋ฆฌํฌ์งํ ๋ฆฌ์์ cldm_v15.yaml ํ์ผ์ ๊ฐ์ ธ์ฌ ์ ์์ต๋๋ค.
4. ์ด์ ์คํฌ๋ฆฝํธ๋ฅผ ์คํํ์ฌ .ckpt ํ์ผ์ ๋ณํํ ์ ์์ต๋๋ค:
```bash
python ../diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path temporalnetv3.ckpt --original_config_file cldm_v15.yaml --dump_path ./ --controlnet
```
5. ๋ณํ์ด ์๋ฃ๋๋ฉด ๋ณํ๋ ๋ชจ๋ธ์ ์
๋ก๋ํ๊ณ ๊ฒฐ๊ณผ๋ฌผ์ pull requestย [pull request](https://huggingface.co/CiaraRowles/TemporalNet/discussions/13)๋ฅผ ํ
์คํธํ์ธ์!
```bash
git push origin pr/13:refs/pr/13
```
## **Keras .pb or .h5**
๐งช ์ด ๊ธฐ๋ฅ์ ์คํ์ ์ธ ๊ธฐ๋ฅ์
๋๋ค. ํ์ฌ๋ก์๋ Stable Diffusion v1 ์ฒดํฌํฌ์ธํธ๋ง ๋ณํ KerasCV Space์์ ์ง์๋ฉ๋๋ค.
[KerasCV](https://keras.io/keras_cv/)๋ [Stable Diffusion](https://github.com/keras-team/keras-cv/blob/master/keras_cv/models/stable_diffusion)ย v1 ๋ฐ v2์ ๋ํ ํ์ต์ ์ง์ํฉ๋๋ค. ๊ทธ๋ฌ๋ ์ถ๋ก ๋ฐ ๋ฐฐํฌ๋ฅผ ์ํ Stable Diffusion ๋ชจ๋ธ ์คํ์ ์ ํ์ ์ผ๋ก ์ง์ํ๋ ๋ฐ๋ฉด, ๐ค Diffusers๋ ๋ค์ํ [noise schedulers](https://huggingface.co/docs/diffusers/using-diffusers/schedulers),ย [flash attention](https://huggingface.co/docs/diffusers/optimization/xformers), andย [other optimization techniques](https://huggingface.co/docs/diffusers/optimization/fp16) ๋ฑ ์ด๋ฌํ ๋ชฉ์ ์ ์ํ ๋ณด๋ค ์๋ฒฝํ ๊ธฐ๋ฅ์ ๊ฐ์ถ๊ณ ์์ต๋๋ค.
[Convert KerasCV](https://huggingface.co/spaces/sayakpaul/convert-kerascv-sd-diffusers)ย Space ๋ณํ์ `.pb`ย ๋๋ย `.h5`์ PyTorch๋ก ๋ณํํ ๋ค์, ์ถ๋ก ํ ์ ์๋๋ก [`StableDiffusionPipeline`] ์ผ๋ก ๊ฐ์ธ์ ์ค๋นํฉ๋๋ค. ๋ณํ๋ ์ฒดํฌํฌ์ธํธ๋ Hugging Face Hub์ ๋ฆฌํฌ์งํ ๋ฆฌ์ ์ ์ฅ๋ฉ๋๋ค.
์์ ๋ก, textual-inversion์ผ๋ก ํ์ต๋ `[sayakpaul/textual-inversion-kerasio](https://huggingface.co/sayakpaul/textual-inversion-kerasio/tree/main)`ย ์ฒดํฌํฌ์ธํธ๋ฅผ ๋ณํํด ๋ณด๊ฒ ์ต๋๋ค. ์ด๊ฒ์ ํน์ ํ ํฐ ย `<my-funny-cat>`์ ์ฌ์ฉํ์ฌ ๊ณ ์์ด๋ก ์ด๋ฏธ์ง๋ฅผ ๊ฐ์ธํํฉ๋๋ค.
KerasCV Space ๋ณํ์์๋ ๋ค์์ ์
๋ ฅํ ์ ์์ต๋๋ค:
- Hugging Face ํ ํฐ.
- UNet ๊ณผ ํ
์คํธ ์ธ์ฝ๋(text encoder) ๊ฐ์ค์น๋ฅผ ๋ค์ด๋ก๋ํ๋ ๊ฒฝ๋ก์
๋๋ค. ๋ชจ๋ธ์ ์ด๋ป๊ฒ ํ์ตํ ์ง ๋ฐฉ์์ ๋ฐ๋ผ, UNet๊ณผ ํ
์คํธ ์ธ์ฝ๋์ ๊ฒฝ๋ก๋ฅผ ๋ชจ๋ ์ ๊ณตํ ํ์๋ ์์ต๋๋ค. ์๋ฅผ ๋ค์ด, textual-inversion์๋ ํ
์คํธ ์ธ์ฝ๋์ ์๋ฒ ๋ฉ๋ง ํ์ํ๊ณ ํ
์คํธ-์ด๋ฏธ์ง(text-to-image) ๋ชจ๋ธ ๋ณํ์๋ UNet ๊ฐ์ค์น๋ง ํ์ํฉ๋๋ค.
- Placeholder ํ ํฐ์ textual-inversion ๋ชจ๋ธ์๋ง ์ ์ฉ๋ฉ๋๋ค.
- `output_repo_prefix`๋ ๋ณํ๋ ๋ชจ๋ธ์ด ์ ์ฅ๋๋ ๋ฆฌํฌ์งํ ๋ฆฌ์ ์ด๋ฆ์
๋๋ค.
**Submit**ย (์ ์ถ) ๋ฒํผ์ ํด๋ฆญํ๋ฉด KerasCV ์ฒดํฌํฌ์ธํธ๊ฐ ์๋์ผ๋ก ๋ณํ๋ฉ๋๋ค! ์ฒดํฌํฌ์ธํธ๊ฐ ์ฑ๊ณต์ ์ผ๋ก ๋ณํ๋๋ฉด, ๋ณํ๋ ์ฒดํฌํฌ์ธํธ๊ฐ ํฌํจ๋ ์ ๋ฆฌํฌ์งํ ๋ฆฌ๋ก ์ฐ๊ฒฐ๋๋ ๋งํฌ๊ฐ ํ์๋ฉ๋๋ค. ์ ๋ฆฌํฌ์งํ ๋ฆฌ๋ก ์ฐ๊ฒฐ๋๋ ๋งํฌ๋ฅผ ๋ฐ๋ผ๊ฐ๋ฉด ๋ณํ๋ ๋ชจ๋ธ์ ์ฌ์ฉํด ๋ณผ ์ ์๋ ์ถ๋ก ์์ ฏ์ด ํฌํจ๋ ๋ชจ๋ธ ์นด๋๊ฐ ์์ฑ๋ KerasCV Space ๋ณํ์ ํ์ธํ ์ ์์ต๋๋ค.
์ฝ๋๋ฅผ ์ฌ์ฉํ์ฌ ์ถ๋ก ์ ์คํํ๋ ค๋ฉด ๋ชจ๋ธ ์นด๋์ ์ค๋ฅธ์ชฝ ์๋จ ๋ชจ์๋ฆฌ์ ์๋ **Use in Diffusers**ย ๋ฒํผ์ ํด๋ฆญํ์ฌ ์์ ์ฝ๋๋ฅผ ๋ณต์ฌํ์ฌ ๋ถ์ฌ๋ฃ์ต๋๋ค:
```py
from diffusers import DiffusionPipeline
pipeline = DiffusionPipeline.from_pretrained("sayakpaul/textual-inversion-cat-kerascv_sd_diffusers_pipeline")
```
๊ทธ๋ฌ๋ฉด ๋ค์๊ณผ ๊ฐ์ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ ์ ์์ต๋๋ค:
```py
from diffusers import DiffusionPipeline
pipeline = DiffusionPipeline.from_pretrained("sayakpaul/textual-inversion-cat-kerascv_sd_diffusers_pipeline")
pipeline.to("cuda")
placeholder_token = "<my-funny-cat-token>"
prompt = f"two {placeholder_token} getting married, photorealistic, high quality"
image = pipeline(prompt, num_inference_steps=50).images[0]
```
## **A1111 LoRA files**
[Automatic1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui)ย (A1111)์ Stable Diffusion์ ์ํด ๋๋ฆฌ ์ฌ์ฉ๋๋ ์น UI๋ก,ย [Civitai](https://civitai.com/) ์ ๊ฐ์ ๋ชจ๋ธ ๊ณต์ ํ๋ซํผ์ ์ง์ํฉ๋๋ค. ํนํ LoRA ๊ธฐ๋ฒ์ผ๋ก ํ์ต๋ ๋ชจ๋ธ์ ํ์ต ์๋๊ฐ ๋น ๋ฅด๊ณ ์์ ํ ํ์ธํ๋๋ ๋ชจ๋ธ๋ณด๋ค ํ์ผ ํฌ๊ธฐ๊ฐ ํจ์ฌ ์๊ธฐ ๋๋ฌธ์ ์ธ๊ธฐ๊ฐ ๋์ต๋๋ค.
๐ค Diffusers๋ [`~loaders.LoraLoaderMixin.load_lora_weights`]:๋ฅผ ์ฌ์ฉํ์ฌ A1111 LoRA ์ฒดํฌํฌ์ธํธ ๋ถ๋ฌ์ค๊ธฐ๋ฅผ ์ง์ํฉ๋๋ค:
```py
from diffusers import DiffusionPipeline, UniPCMultistepScheduler
import torch
pipeline = DiffusionPipeline.from_pretrained(
"andite/anything-v4.0", torch_dtype=torch.float16, safety_checker=None
).to("cuda")
pipeline.scheduler = UniPCMultistepScheduler.from_config(pipeline.scheduler.config)
```
Civitai์์ LoRA ์ฒดํฌํฌ์ธํธ๋ฅผ ๋ค์ด๋ก๋ํ์ธ์; ์ด ์์ ์์๋ ย [Howls Moving Castle,Interior/Scenery LoRA (Ghibli Stlye)](https://civitai.com/models/14605?modelVersionId=19998) ์ฒดํฌํฌ์ธํธ๋ฅผ ์ฌ์ฉํ์ง๋ง, ์ด๋ค LoRA ์ฒดํฌํฌ์ธํธ๋ ์์ ๋กญ๊ฒ ์ฌ์ฉํด ๋ณด์ธ์!
```bash
!wget https://civitai.com/api/download/models/19998 -O howls_moving_castle.safetensors
```
๋ฉ์๋๋ฅผ ์ฌ์ฉํ์ฌ ํ์ดํ๋ผ์ธ์ LoRA ์ฒดํฌํฌ์ธํธ๋ฅผ ๋ถ๋ฌ์ต๋๋ค:
```py
pipeline.load_lora_weights(".", weight_name="howls_moving_castle.safetensors")
```
์ด์ ํ์ดํ๋ผ์ธ์ ์ฌ์ฉํ์ฌ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ ์ ์์ต๋๋ค:
```py
prompt = "masterpiece, illustration, ultra-detailed, cityscape, san francisco, golden gate bridge, california, bay area, in the snow, beautiful detailed starry sky"
negative_prompt = "lowres, cropped, worst quality, low quality, normal quality, artifacts, signature, watermark, username, blurry, more than one bridge, bad architecture"
images = pipeline(
prompt=prompt,
negative_prompt=negative_prompt,
width=512,
height=512,
num_inference_steps=25,
num_images_per_prompt=4,
generator=torch.manual_seed(0),
).images
```
๋ง์ง๋ง์ผ๋ก, ๋์คํ๋ ์ด์ ์ด๋ฏธ์ง๋ฅผ ํ์ํ๋ ํฌํผ ํจ์๋ฅผ ๋ง๋ญ๋๋ค:
```py
from PIL import Image
def image_grid(imgs, rows=2, cols=2):
w, h = imgs[0].size
grid = Image.new("RGB", size=(cols * w, rows * h))
for i, img in enumerate(imgs):
grid.paste(img, box=(i % cols * w, i // cols * h))
return grid
image_grid(images)
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/a1111-lora-sf.png" />
</div>
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/using-diffusers/custom_pipeline_overview.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# ์ปค์คํ
ํ์ดํ๋ผ์ธ ๋ถ๋ฌ์ค๊ธฐ
[[open-in-colab]]
์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ์ ๋
ผ๋ฌธ์ ๋ช
์๋ ์๋์ ๊ตฌํ์ฒด์ ๋ค๋ฅธ ํํ๋ก ๊ตฌํ๋ ๋ชจ๋ [`DiffusionPipeline`] ํด๋์ค๋ฅผ ์๋ฏธํฉ๋๋ค. (์๋ฅผ ๋ค์ด, [`StableDiffusionControlNetPipeline`]๋ ["Text-to-Image Generation with ControlNet Conditioning"](https://arxiv.org/abs/2302.05543) ํด๋น) ์ด๋ค์ ์ถ๊ฐ ๊ธฐ๋ฅ์ ์ ๊ณตํ๊ฑฐ๋ ํ์ดํ๋ผ์ธ์ ์๋ ๊ตฌํ์ ํ์ฅํฉ๋๋ค.
[Speech to Image](https://github.com/huggingface/diffusers/tree/main/examples/community#speech-to-image) ๋๋ [Composable Stable Diffusion](https://github.com/huggingface/diffusers/tree/main/examples/community#composable-stable-diffusion) ๊ณผ ๊ฐ์ ๋ฉ์ง ์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ์ด ๋ง์ด ์์ผ๋ฉฐ [์ฌ๊ธฐ์์](https://github.com/huggingface/diffusers/tree/main/examples/community) ๋ชจ๋ ๊ณต์ ์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ์ ์ฐพ์ ์ ์์ต๋๋ค.
ํ๋ธ์์ ์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ์ ๋ก๋ํ๋ ค๋ฉด, ์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ์ ๋ฆฌํฌ์งํ ๋ฆฌ ID์ (ํ์ดํ๋ผ์ธ ๊ฐ์ค์น ๋ฐ ๊ตฌ์ฑ ์์๋ฅผ ๋ก๋ํ๋ ค๋) ๋ชจ๋ธ์ ๋ฆฌํฌ์งํ ๋ฆฌ ID๋ฅผ ์ธ์๋ก ์ ๋ฌํด์ผ ํฉ๋๋ค. ์๋ฅผ ๋ค์ด, ์๋ ์์์์๋ `hf-internal-testing/diffusers-dummy-pipeline`์์ ๋๋ฏธ ํ์ดํ๋ผ์ธ์ ๋ถ๋ฌ์ค๊ณ , `google/ddpm-cifar10-32`์์ ํ์ดํ๋ผ์ธ์ ๊ฐ์ค์น์ ์ปดํฌ๋ํธ๋ค์ ๋ก๋ํฉ๋๋ค.
<Tip warning={true}>
๐ ํ๊น
ํ์ด์ค ํ๋ธ์์ ์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ์ ๋ถ๋ฌ์ค๋ ๊ฒ์ ๊ณง ํด๋น ์ฝ๋๊ฐ ์์ ํ๋ค๊ณ ์ ๋ขฐํ๋ ๊ฒ์
๋๋ค. ์ฝ๋๋ฅผ ์๋์ผ๋ก ๋ถ๋ฌ์ค๊ณ ์คํํ๊ธฐ ์์ ๋ฐ๋์ ์จ๋ผ์ธ์ผ๋ก ํด๋น ์ฝ๋์ ์ ๋ขฐ์ฑ์ ๊ฒ์ฌํ์ธ์!
</Tip>
```py
from diffusers import DiffusionPipeline
pipeline = DiffusionPipeline.from_pretrained(
"google/ddpm-cifar10-32", custom_pipeline="hf-internal-testing/diffusers-dummy-pipeline"
)
```
๊ณต์ ์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ์ ๋ถ๋ฌ์ค๋ ๊ฒ์ ๋น์ทํ์ง๋ง, ๊ณต์ ๋ฆฌํฌ์งํ ๋ฆฌ ID์์ ๊ฐ์ค์น๋ฅผ ๋ถ๋ฌ์ค๋ ๊ฒ๊ณผ ๋๋ถ์ด ํด๋น ํ์ดํ๋ผ์ธ ๋ด์ ์ปดํฌ๋ํธ๋ฅผ ์ง์ ์ง์ ํ๋ ๊ฒ ์ญ์ ๊ฐ๋ฅํฉ๋๋ค. ์๋ ์์ ๋ฅผ ๋ณด๋ฉด ์ปค๋ฎค๋ํฐ [CLIP Guided Stable Diffusion](https://github.com/huggingface/diffusers/tree/main/examples/community#clip-guided-stable-diffusion) ํ์ดํ๋ผ์ธ์ ๋ก๋ํ ๋, ํด๋น ํ์ดํ๋ผ์ธ์์ ์ฌ์ฉํ `clip_model` ์ปดํฌ๋ํธ์ `feature_extractor` ์ปดํฌ๋ํธ๋ฅผ ์ง์ ์ค์ ํ๋ ๊ฒ์ ํ์ธํ ์ ์์ต๋๋ค.
```py
from diffusers import DiffusionPipeline
from transformers import CLIPImageProcessor, CLIPModel
clip_model_id = "laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
feature_extractor = CLIPImageProcessor.from_pretrained(clip_model_id)
clip_model = CLIPModel.from_pretrained(clip_model_id)
pipeline = DiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5",
custom_pipeline="clip_guided_stable_diffusion",
clip_model=clip_model,
feature_extractor=feature_extractor,
)
```
์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ์ ๋ํ ์์ธํ ๋ด์ฉ์ [์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ](https://github.com/huggingface/diffusers/blob/main/docs/source/en/using-diffusers/custom_pipeline_examples) ๊ฐ์ด๋๋ฅผ ์ดํด๋ณด์ธ์. ์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ ๋ฑ๋ก์ ๊ด์ฌ์ด ์๋ ๊ฒฝ์ฐ [์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ์ ๊ธฐ์ฌํ๋ ๋ฐฉ๋ฒ](https://github.com/huggingface/diffusers/blob/main/docs/source/en/using-diffusers/contribute_pipeline)์ ๋ํ ๊ฐ์ด๋๋ฅผ ํ์ธํ์ธ์ !
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/using-diffusers/write_own_pipeline.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# ํ์ดํ๋ผ์ธ, ๋ชจ๋ธ ๋ฐ ์ค์ผ์ค๋ฌ ์ดํดํ๊ธฐ
[[open-in-colab]]
๐งจ Diffusers๋ ์ฌ์ฉ์ ์นํ์ ์ด๋ฉฐ ์ ์ฐํ ๋๊ตฌ ์์๋ก, ์ฌ์ฉ์ฌ๋ก์ ๋ง๊ฒ diffusion ์์คํ
์ ๊ตฌ์ถ ํ ์ ์๋๋ก ์ค๊ณ๋์์ต๋๋ค. ์ด ๋๊ตฌ ์์์ ํต์ฌ์ ๋ชจ๋ธ๊ณผ ์ค์ผ์ค๋ฌ์
๋๋ค. [`DiffusionPipeline`]์ ํธ์๋ฅผ ์ํด ์ด๋ฌํ ๊ตฌ์ฑ ์์๋ฅผ ๋ฒ๋ค๋ก ์ ๊ณตํ์ง๋ง, ํ์ดํ๋ผ์ธ์ ๋ถ๋ฆฌํ๊ณ ๋ชจ๋ธ๊ณผ ์ค์ผ์ค๋ฌ๋ฅผ ๊ฐ๋ณ์ ์ผ๋ก ์ฌ์ฉํด ์๋ก์ด diffusion ์์คํ
์ ๋ง๋ค ์๋ ์์ต๋๋ค.
์ด ํํ ๋ฆฌ์ผ์์๋ ๊ธฐ๋ณธ ํ์ดํ๋ผ์ธ๋ถํฐ ์์ํด Stable Diffusion ํ์ดํ๋ผ์ธ๊น์ง ์งํํ๋ฉฐ ๋ชจ๋ธ๊ณผ ์ค์ผ์ค๋ฌ๋ฅผ ์ฌ์ฉํด ์ถ๋ก ์ ์ํ diffusion ์์คํ
์ ์กฐ๋ฆฝํ๋ ๋ฐฉ๋ฒ์ ๋ฐฐ์๋๋ค.
## ๊ธฐ๋ณธ ํ์ดํ๋ผ์ธ ํด์ฒดํ๊ธฐ
ํ์ดํ๋ผ์ธ์ ์ถ๋ก ์ ์ํด ๋ชจ๋ธ์ ์คํํ๋ ๋น ๋ฅด๊ณ ์ฌ์ด ๋ฐฉ๋ฒ์ผ๋ก, ์ด๋ฏธ์ง๋ฅผ ์์ฑํ๋ ๋ฐ ์ฝ๋๊ฐ 4์ค ์ด์ ํ์ํ์ง ์์ต๋๋ค:
```py
>>> from diffusers import DDPMPipeline
>>> ddpm = DDPMPipeline.from_pretrained("google/ddpm-cat-256").to("cuda")
>>> image = ddpm(num_inference_steps=25).images[0]
>>> image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ddpm-cat.png" alt="Image of cat created from DDPMPipeline"/>
</div>
์ ๋ง ์ฝ์ต๋๋ค. ๊ทธ๋ฐ๋ฐ ํ์ดํ๋ผ์ธ์ ์ด๋ป๊ฒ ์ด๋ ๊ฒ ํ ์ ์์์๊น์? ํ์ดํ๋ผ์ธ์ ์ธ๋ถํํ์ฌ ๋ด๋ถ์์ ์ด๋ค ์ผ์ด ์ผ์ด๋๊ณ ์๋์ง ์ดํด๋ณด๊ฒ ์ต๋๋ค.
์ ์์์์ ํ์ดํ๋ผ์ธ์๋ [`UNet2DModel`] ๋ชจ๋ธ๊ณผ [`DDPMScheduler`]๊ฐ ํฌํจ๋์ด ์์ต๋๋ค. ํ์ดํ๋ผ์ธ์ ์ํ๋ ์ถ๋ ฅ ํฌ๊ธฐ์ ๋๋ค ๋
ธ์ด์ฆ๋ฅผ ๋ฐ์ ๋ชจ๋ธ์ ์ฌ๋ฌ๋ฒ ํต๊ณผ์์ผ ์ด๋ฏธ์ง์ ๋
ธ์ด์ฆ๋ฅผ ์ ๊ฑฐํฉ๋๋ค. ๊ฐ timestep์์ ๋ชจ๋ธ์ *noise residual*์ ์์ธกํ๊ณ ์ค์ผ์ค๋ฌ๋ ์ด๋ฅผ ์ฌ์ฉํ์ฌ ๋
ธ์ด์ฆ๊ฐ ์ ์ ์ด๋ฏธ์ง๋ฅผ ์์ธกํฉ๋๋ค. ํ์ดํ๋ผ์ธ์ ์ง์ ๋ ์ถ๋ก ์คํ
์์ ๋๋ฌํ ๋๊น์ง ์ด ๊ณผ์ ์ ๋ฐ๋ณตํฉ๋๋ค.
๋ชจ๋ธ๊ณผ ์ค์ผ์ค๋ฌ๋ฅผ ๋ณ๋๋ก ์ฌ์ฉํ์ฌ ํ์ดํ๋ผ์ธ์ ๋ค์ ์์ฑํ๊ธฐ ์ํด ์์ฒด์ ์ธ ๋
ธ์ด์ฆ ์ ๊ฑฐ ํ๋ก์ธ์ค๋ฅผ ์์ฑํด ๋ณด๊ฒ ์ต๋๋ค.
1. ๋ชจ๋ธ๊ณผ ์ค์ผ์ค๋ฌ๋ฅผ ๋ถ๋ฌ์ต๋๋ค:
```py
>>> from diffusers import DDPMScheduler, UNet2DModel
>>> scheduler = DDPMScheduler.from_pretrained("google/ddpm-cat-256")
>>> model = UNet2DModel.from_pretrained("google/ddpm-cat-256").to("cuda")
```
2. ๋
ธ์ด์ฆ ์ ๊ฑฐ ํ๋ก์ธ์ค๋ฅผ ์คํํ timestep ์๋ฅผ ์ค์ ํฉ๋๋ค:
```py
>>> scheduler.set_timesteps(50)
```
3. ์ค์ผ์ค๋ฌ์ timestep์ ์ค์ ํ๋ฉด ๊ท ๋ฑํ ๊ฐ๊ฒฉ์ ๊ตฌ์ฑ ์์๋ฅผ ๊ฐ์ง ํ
์๊ฐ ์์ฑ๋ฉ๋๋ค.(์ด ์์์์๋ 50๊ฐ) ๊ฐ ์์๋ ๋ชจ๋ธ์ด ์ด๋ฏธ์ง์ ๋
ธ์ด์ฆ๋ฅผ ์ ๊ฑฐํ๋ ์๊ฐ ๊ฐ๊ฒฉ์ ํด๋นํฉ๋๋ค. ๋์ค์ ๋
ธ์ด์ฆ ์ ๊ฑฐ ๋ฃจํ๋ฅผ ๋ง๋ค ๋ ์ด ํ
์๋ฅผ ๋ฐ๋ณตํ์ฌ ์ด๋ฏธ์ง์ ๋
ธ์ด์ฆ๋ฅผ ์ ๊ฑฐํฉ๋๋ค:
```py
>>> scheduler.timesteps
tensor([980, 960, 940, 920, 900, 880, 860, 840, 820, 800, 780, 760, 740, 720,
700, 680, 660, 640, 620, 600, 580, 560, 540, 520, 500, 480, 460, 440,
420, 400, 380, 360, 340, 320, 300, 280, 260, 240, 220, 200, 180, 160,
140, 120, 100, 80, 60, 40, 20, 0])
```
4. ์ํ๋ ์ถ๋ ฅ๊ณผ ๊ฐ์ ๋ชจ์์ ๊ฐ์ง ๋๋ค ๋
ธ์ด์ฆ๋ฅผ ์์ฑํฉ๋๋ค:
```py
>>> import torch
>>> sample_size = model.config.sample_size
>>> noise = torch.randn((1, 3, sample_size, sample_size), device="cuda")
```
5. ์ด์ timestep์ ๋ฐ๋ณตํ๋ ๋ฃจํ๋ฅผ ์์ฑํฉ๋๋ค. ๊ฐ timestep์์ ๋ชจ๋ธ์ [`UNet2DModel.forward`]๋ฅผ ํตํด noisy residual์ ๋ฐํํฉ๋๋ค. ์ค์ผ์ค๋ฌ์ [`~DDPMScheduler.step`] ๋ฉ์๋๋ noisy residual, timestep, ๊ทธ๋ฆฌ๊ณ ์
๋ ฅ์ ๋ฐ์ ์ด์ timestep์์ ์ด๋ฏธ์ง๋ฅผ ์์ธกํฉ๋๋ค. ์ด ์ถ๋ ฅ์ ๋
ธ์ด์ฆ ์ ๊ฑฐ ๋ฃจํ์ ๋ชจ๋ธ์ ๋ํ ๋ค์ ์
๋ ฅ์ด ๋๋ฉฐ, `timesteps` ๋ฐฐ์ด์ ๋์ ๋๋ฌํ ๋๊น์ง ๋ฐ๋ณต๋ฉ๋๋ค.
```py
>>> input = noise
>>> for t in scheduler.timesteps:
... with torch.no_grad():
... noisy_residual = model(input, t).sample
... previous_noisy_sample = scheduler.step(noisy_residual, t, input).prev_sample
... input = previous_noisy_sample
```
์ด๊ฒ์ด ์ ์ฒด ๋
ธ์ด์ฆ ์ ๊ฑฐ ํ๋ก์ธ์ค์ด๋ฉฐ, ๋์ผํ ํจํด์ ์ฌ์ฉํด ๋ชจ๋ diffusion ์์คํ
์ ์์ฑํ ์ ์์ต๋๋ค.
6. ๋ง์ง๋ง ๋จ๊ณ๋ ๋
ธ์ด์ฆ๊ฐ ์ ๊ฑฐ๋ ์ถ๋ ฅ์ ์ด๋ฏธ์ง๋ก ๋ณํํ๋ ๊ฒ์
๋๋ค:
```py
>>> from PIL import Image
>>> import numpy as np
>>> image = (input / 2 + 0.5).clamp(0, 1)
>>> image = image.cpu().permute(0, 2, 3, 1).numpy()[0]
>>> image = Image.fromarray((image * 255).round().astype("uint8"))
>>> image
```
๋ค์ ์น์
์์๋ ์ฌ๋ฌ๋ถ์ ๊ธฐ์ ์ ์ํํด๋ณด๊ณ ์ข ๋ ๋ณต์กํ Stable Diffusion ํ์ดํ๋ผ์ธ์ ๋ถ์ํด ๋ณด๊ฒ ์ต๋๋ค. ๋ฐฉ๋ฒ์ ๊ฑฐ์ ๋์ผํฉ๋๋ค. ํ์ํ ๊ตฌ์ฑ์์๋ค์ ์ด๊ธฐํํ๊ณ timestep์๋ฅผ ์ค์ ํ์ฌ `timestep` ๋ฐฐ์ด์ ์์ฑํฉ๋๋ค. ๋
ธ์ด์ฆ ์ ๊ฑฐ ๋ฃจํ์์ `timestep` ๋ฐฐ์ด์ด ์ฌ์ฉ๋๋ฉฐ, ์ด ๋ฐฐ์ด์ ๊ฐ ์์์ ๋ํด ๋ชจ๋ธ์ ๋
ธ์ด์ฆ๊ฐ ์ ์ ์ด๋ฏธ์ง๋ฅผ ์์ธกํฉ๋๋ค. ๋
ธ์ด์ฆ ์ ๊ฑฐ ๋ฃจํ๋ `timestep`์ ๋ฐ๋ณตํ๊ณ ๊ฐ timestep์์ noise residual์ ์ถ๋ ฅํ๊ณ ์ค์ผ์ค๋ฌ๋ ์ด๋ฅผ ์ฌ์ฉํ์ฌ ์ด์ timestep์์ ๋
ธ์ด์ฆ๊ฐ ๋ํ ์ด๋ฏธ์ง๋ฅผ ์์ธกํฉ๋๋ค. ์ด ํ๋ก์ธ์ค๋ `timestep` ๋ฐฐ์ด์ ๋์ ๋๋ฌํ ๋๊น์ง ๋ฐ๋ณต๋ฉ๋๋ค.
ํ๋ฒ ์ฌ์ฉํด ๋ด
์๋ค!
## Stable Diffusion ํ์ดํ๋ผ์ธ ํด์ฒดํ๊ธฐ
Stable Diffusion ์ text-to-image *latent diffusion* ๋ชจ๋ธ์
๋๋ค. latent diffusion ๋ชจ๋ธ์ด๋ผ๊ณ ๋ถ๋ฆฌ๋ ์ด์ ๋ ์ค์ ํฝ์
๊ณต๊ฐ ๋์ ์ด๋ฏธ์ง์ ์ ์ฐจ์์ ํํ์ผ๋ก ์์
ํ๊ธฐ ๋๋ฌธ์ด๊ณ , ๋ฉ๋ชจ๋ฆฌ ํจ์จ์ด ๋ ๋์ต๋๋ค. ์ธ์ฝ๋๋ ์ด๋ฏธ์ง๋ฅผ ๋ ์์ ํํ์ผ๋ก ์์ถํ๊ณ , ๋์ฝ๋๋ ์์ถ๋ ํํ์ ๋ค์ ์ด๋ฏธ์ง๋ก ๋ณํํฉ๋๋ค. text-to-image ๋ชจ๋ธ์ ๊ฒฝ์ฐ ํ
์คํธ ์๋ฒ ๋ฉ์ ์์ฑํ๊ธฐ ์ํด tokenizer์ ์ธ์ฝ๋๊ฐ ํ์ํฉ๋๋ค. ์ด์ ์์ ์์ ์ด๋ฏธ UNet ๋ชจ๋ธ๊ณผ ์ค์ผ์ค๋ฌ๊ฐ ํ์ํ๋ค๋ ๊ฒ์ ์๊ณ ๊ณ์
จ์ ๊ฒ์
๋๋ค.
๋ณด์๋ค์ํผ, ์ด๊ฒ์ UNet ๋ชจ๋ธ๋ง ํฌํจ๋ DDPM ํ์ดํ๋ผ์ธ๋ณด๋ค ๋ ๋ณต์กํฉ๋๋ค. Stable Diffusion ๋ชจ๋ธ์๋ ์ธ ๊ฐ์ ๊ฐ๋ณ ์ฌ์ ํ์ต๋ ๋ชจ๋ธ์ด ์์ต๋๋ค.
<Tip>
๐ก VAE, UNet ๋ฐ ํ
์คํธ ์ธ์ฝ๋ ๋ชจ๋ธ์ ์๋๋ฐฉ์์ ๋ํ ์์ธํ ๋ด์ฉ์ [How does Stable Diffusion work?](https://huggingface.co/blog/stable_diffusion#how-does-stable-diffusion-work) ๋ธ๋ก๊ทธ๋ฅผ ์ฐธ์กฐํ์ธ์.
</Tip>
์ด์ Stable Diffusion ํ์ดํ๋ผ์ธ์ ํ์ํ ๊ตฌ์ฑ์์๋ค์ด ๋ฌด์์ธ์ง ์์์ผ๋, [`~ModelMixin.from_pretrained`] ๋ฉ์๋๋ฅผ ์ฌ์ฉํด ๋ชจ๋ ๊ตฌ์ฑ์์๋ฅผ ๋ถ๋ฌ์ต๋๋ค. ์ฌ์ ํ์ต๋ ์ฒดํฌํฌ์ธํธ [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5)์์ ์ฐพ์ ์ ์์ผ๋ฉฐ, ๊ฐ ๊ตฌ์ฑ์์๋ค์ ๋ณ๋์ ํ์ ํด๋์ ์ ์ฅ๋์ด ์์ต๋๋ค:
```py
>>> from PIL import Image
>>> import torch
>>> from transformers import CLIPTextModel, CLIPTokenizer
>>> from diffusers import AutoencoderKL, UNet2DConditionModel, PNDMScheduler
>>> vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae")
>>> tokenizer = CLIPTokenizer.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="tokenizer")
>>> text_encoder = CLIPTextModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="text_encoder")
>>> unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="unet")
```
๊ธฐ๋ณธ [`PNDMScheduler`] ๋์ , [`UniPCMultistepScheduler`]๋ก ๊ต์ฒดํ์ฌ ๋ค๋ฅธ ์ค์ผ์ค๋ฌ๋ฅผ ์ผ๋ง๋ ์ฝ๊ฒ ์ฐ๊ฒฐํ ์ ์๋์ง ํ์ธํฉ๋๋ค:
```py
>>> from diffusers import UniPCMultistepScheduler
>>> scheduler = UniPCMultistepScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler")
```
์ถ๋ก ์๋๋ฅผ ๋์ด๋ ค๋ฉด ์ค์ผ์ค๋ฌ์ ๋ฌ๋ฆฌ ํ์ต ๊ฐ๋ฅํ ๊ฐ์ค์น๊ฐ ์์ผ๋ฏ๋ก ๋ชจ๋ธ์ GPU๋ก ์ฎ๊ธฐ์ธ์:
```py
>>> torch_device = "cuda"
>>> vae.to(torch_device)
>>> text_encoder.to(torch_device)
>>> unet.to(torch_device)
```
### ํ
์คํธ ์๋ฒ ๋ฉ ์์ฑํ๊ธฐ
๋ค์ ๋จ๊ณ๋ ์๋ฒ ๋ฉ์ ์์ฑํ๊ธฐ ์ํด ํ
์คํธ๋ฅผ ํ ํฐํํ๋ ๊ฒ์
๋๋ค. ์ด ํ
์คํธ๋ UNet ๋ชจ๋ธ์์ condition์ผ๋ก ์ฌ์ฉ๋๊ณ ์
๋ ฅ ํ๋กฌํํธ์ ์ ์ฌํ ๋ฐฉํฅ์ผ๋ก diffusion ํ๋ก์ธ์ค๋ฅผ ์กฐ์ ํ๋ ๋ฐ ์ฌ์ฉ๋ฉ๋๋ค.
<Tip>
๐ก `guidance_scale` ๋งค๊ฐ๋ณ์๋ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ ๋ ํ๋กฌํํธ์ ์ผ๋ง๋ ๋ง์ ๊ฐ์ค์น๋ฅผ ๋ถ์ฌํ ์ง ๊ฒฐ์ ํฉ๋๋ค.
</Tip>
๋ค๋ฅธ ํ๋กฌํํธ๋ฅผ ์์ฑํ๊ณ ์ถ๋ค๋ฉด ์ํ๋ ํ๋กฌํํธ๋ฅผ ์์ ๋กญ๊ฒ ์ ํํ์ธ์!
```py
>>> prompt = ["a photograph of an astronaut riding a horse"]
>>> height = 512 # Stable Diffusion์ ๊ธฐ๋ณธ ๋์ด
>>> width = 512 # Stable Diffusion์ ๊ธฐ๋ณธ ๋๋น
>>> num_inference_steps = 25 # ๋
ธ์ด์ฆ ์ ๊ฑฐ ์คํ
์
>>> guidance_scale = 7.5 # classifier-free guidance๋ฅผ ์ํ scale
>>> generator = torch.manual_seed(0) # ์ด๊ธฐ ์ ์ฌ ๋
ธ์ด์ฆ๋ฅผ ์์ฑํ๋ seed generator
>>> batch_size = len(prompt)
```
ํ
์คํธ๋ฅผ ํ ํฐํํ๊ณ ํ๋กฌํํธ์์ ์๋ฒ ๋ฉ์ ์์ฑํฉ๋๋ค:
```py
>>> text_input = tokenizer(
... prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt"
... )
>>> with torch.no_grad():
... text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]
```
๋ํ ํจ๋ฉ ํ ํฐ์ ์๋ฒ ๋ฉ์ธ *unconditional ํ
์คํธ ์๋ฒ ๋ฉ*์ ์์ฑํด์ผ ํฉ๋๋ค. ์ด ์๋ฒ ๋ฉ์ ์กฐ๊ฑด๋ถ `text_embeddings`๊ณผ ๋์ผํ shape(`batch_size` ๊ทธ๋ฆฌ๊ณ `seq_length`)์ ๊ฐ์ ธ์ผ ํฉ๋๋ค:
```py
>>> max_length = text_input.input_ids.shape[-1]
>>> uncond_input = tokenizer([""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt")
>>> uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0]
```
๋๋ฒ์ forward pass๋ฅผ ํผํ๊ธฐ ์ํด conditional ์๋ฒ ๋ฉ๊ณผ unconditional ์๋ฒ ๋ฉ์ ๋ฐฐ์น(batch)๋ก ์ฐ๊ฒฐํ๊ฒ ์ต๋๋ค:
```py
>>> text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
```
### ๋๋ค ๋
ธ์ด์ฆ ์์ฑ
๊ทธ๋ค์ diffusion ํ๋ก์ธ์ค์ ์์์ ์ผ๋ก ์ด๊ธฐ ๋๋ค ๋
ธ์ด์ฆ๋ฅผ ์์ฑํฉ๋๋ค. ์ด๊ฒ์ด ์ด๋ฏธ์ง์ ์ ์ฌ์ ํํ์ด๋ฉฐ ์ ์ฐจ์ ์ผ๋ก ๋
ธ์ด์ฆ๊ฐ ์ ๊ฑฐ๋ฉ๋๋ค. ์ด ์์ ์์ `latent` ์ด๋ฏธ์ง๋ ์ต์ข
์ด๋ฏธ์ง ํฌ๊ธฐ๋ณด๋ค ์์ง๋ง ๋์ค์ ๋ชจ๋ธ์ด ์ด๋ฅผ 512x512 ์ด๋ฏธ์ง ํฌ๊ธฐ๋ก ๋ณํํ๋ฏ๋ก ๊ด์ฐฎ์ต๋๋ค.
<Tip>
๐ก `vae` ๋ชจ๋ธ์๋ 3๊ฐ์ ๋ค์ด ์ํ๋ง ๋ ์ด์ด๊ฐ ์๊ธฐ ๋๋ฌธ์ ๋์ด์ ๋๋น๊ฐ 8๋ก ๋๋ฉ๋๋ค. ๋ค์์ ์คํํ์ฌ ํ์ธํ ์ ์์ต๋๋ค:
```py
2 ** (len(vae.config.block_out_channels) - 1) == 8
```
</Tip>
```py
>>> latents = torch.randn(
... (batch_size, unet.in_channels, height // 8, width // 8),
... generator=generator,
... device=torch_device,
... )
```
### ์ด๋ฏธ์ง ๋
ธ์ด์ฆ ์ ๊ฑฐ
๋จผ์ [`UniPCMultistepScheduler`]์ ๊ฐ์ ํฅ์๋ ์ค์ผ์ค๋ฌ์ ํ์ํ ๋
ธ์ด์ฆ ์ค์ผ์ผ ๊ฐ์ธ ์ด๊ธฐ ๋
ธ์ด์ฆ ๋ถํฌ *sigma* ๋ก ์
๋ ฅ์ ์ค์ผ์ผ๋ง ํ๋ ๊ฒ๋ถํฐ ์์ํฉ๋๋ค:
```py
>>> latents = latents * scheduler.init_noise_sigma
```
๋ง์ง๋ง ๋จ๊ณ๋ `latent`์ ์์ํ ๋
ธ์ด์ฆ๋ฅผ ์ ์ง์ ์ผ๋ก ํ๋กฌํํธ์ ์ค๋ช
๋ ์ด๋ฏธ์ง๋ก ๋ณํํ๋ ๋
ธ์ด์ฆ ์ ๊ฑฐ ๋ฃจํ๋ฅผ ์์ฑํ๋ ๊ฒ์
๋๋ค. ๋
ธ์ด์ฆ ์ ๊ฑฐ ๋ฃจํ๋ ์ธ ๊ฐ์ง ์์
์ ์ํํด์ผ ํ๋ค๋ ์ ์ ๊ธฐ์ตํ์ธ์:
1. ๋
ธ์ด์ฆ ์ ๊ฑฐ ์ค์ ์ฌ์ฉํ ์ค์ผ์ค๋ฌ์ timesteps๋ฅผ ์ค์ ํฉ๋๋ค.
2. timestep์ ๋ฐ๋ผ ๋ฐ๋ณตํฉ๋๋ค.
3. ๊ฐ timestep์์ UNet ๋ชจ๋ธ์ ํธ์ถํ์ฌ noise residual์ ์์ธกํ๊ณ ์ค์ผ์ค๋ฌ์ ์ ๋ฌํ์ฌ ์ด์ ๋
ธ์ด์ฆ ์ํ์ ๊ณ์ฐํฉ๋๋ค.
```py
>>> from tqdm.auto import tqdm
>>> scheduler.set_timesteps(num_inference_steps)
>>> for t in tqdm(scheduler.timesteps):
... # classifier-free guidance๋ฅผ ์ํํ๋ ๊ฒฝ์ฐ ๋๋ฒ์ forward pass๋ฅผ ์ํํ์ง ์๋๋ก latent๋ฅผ ํ์ฅ.
... latent_model_input = torch.cat([latents] * 2)
... latent_model_input = scheduler.scale_model_input(latent_model_input, timestep=t)
... # noise residual ์์ธก
... with torch.no_grad():
... noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
... # guidance ์ํ
... noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
... noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
... # ์ด์ ๋
ธ์ด์ฆ ์ํ์ ๊ณ์ฐ x_t -> x_t-1
... latents = scheduler.step(noise_pred, t, latents).prev_sample
```
### ์ด๋ฏธ์ง ๋์ฝ๋ฉ
๋ง์ง๋ง ๋จ๊ณ๋ `vae`๋ฅผ ์ด์ฉํ์ฌ ์ ์ฌ ํํ์ ์ด๋ฏธ์ง๋ก ๋์ฝ๋ฉํ๊ณ `sample`๊ณผ ํจ๊ป ๋์ฝ๋ฉ๋ ์ถ๋ ฅ์ ์ป๋ ๊ฒ์
๋๋ค:
```py
# latent๋ฅผ ์ค์ผ์ผ๋งํ๊ณ vae๋ก ์ด๋ฏธ์ง ๋์ฝ๋ฉ
latents = 1 / 0.18215 * latents
with torch.no_grad():
image = vae.decode(latents).sample
```
๋ง์ง๋ง์ผ๋ก ์ด๋ฏธ์ง๋ฅผ `PIL.Image`๋ก ๋ณํํ๋ฉด ์์ฑ๋ ์ด๋ฏธ์ง๋ฅผ ํ์ธํ ์ ์์ต๋๋ค!
```py
>>> image = (image / 2 + 0.5).clamp(0, 1)
>>> image = image.detach().cpu().permute(0, 2, 3, 1).numpy()
>>> images = (image * 255).round().astype("uint8")
>>> pil_images = [Image.fromarray(image) for image in images]
>>> pil_images[0]
```
<div class="flex justify-center">
<img src="https://huggingface.co/blog/assets/98_stable_diffusion/stable_diffusion_k_lms.png"/>
</div>
## ๋ค์ ๋จ๊ณ
๊ธฐ๋ณธ ํ์ดํ๋ผ์ธ๋ถํฐ ๋ณต์กํ ํ์ดํ๋ผ์ธ๊น์ง, ์์ ๋ง์ diffusion ์์คํ
์ ์์ฑํ๋ ๋ฐ ํ์ํ ๊ฒ์ ๋
ธ์ด์ฆ ์ ๊ฑฐ ๋ฃจํ๋ฟ์ด๋ผ๋ ๊ฒ์ ์ ์ ์์์ต๋๋ค. ์ด ๋ฃจํ๋ ์ค์ผ์ค๋ฌ์ timesteps๋ฅผ ์ค์ ํ๊ณ , ์ด๋ฅผ ๋ฐ๋ณตํ๋ฉฐ, UNet ๋ชจ๋ธ์ ํธ์ถํ์ฌ noise residual์ ์์ธกํ๊ณ ์ค์ผ์ค๋ฌ์ ์ ๋ฌํ์ฌ ์ด์ ๋
ธ์ด์ฆ ์ํ์ ๊ณ์ฐํ๋ ๊ณผ์ ์ ๋ฒ๊ฐ์ ๊ฐ๋ฉฐ ์ํํด์ผ ํฉ๋๋ค.
์ด๊ฒ์ด ๋ฐ๋ก ๐งจ Diffusers๊ฐ ์ค๊ณ๋ ๋ชฉ์ ์
๋๋ค: ๋ชจ๋ธ๊ณผ ์ค์ผ์ค๋ฌ๋ฅผ ์ฌ์ฉํด ์์ ๋ง์ diffusion ์์คํ
์ ์ง๊ด์ ์ด๊ณ ์ฝ๊ฒ ์์ฑํ ์ ์๋๋ก ํ๊ธฐ ์ํด์์
๋๋ค.
๋ค์ ๋จ๊ณ๋ฅผ ์์ ๋กญ๊ฒ ์งํํ์ธ์:
* ๐งจ Diffusers์ [ํ์ดํ๋ผ์ธ ๊ตฌ์ถ ๋ฐ ๊ธฐ์ฌ](using-diffusers/#contribute_pipeline)ํ๋ ๋ฐฉ๋ฒ์ ์์๋ณด์ธ์. ์ฌ๋ฌ๋ถ์ด ์ด๋ค ์์ด๋์ด๋ฅผ ๋ด๋์์ง ๊ธฐ๋๋ฉ๋๋ค!
* ๋ผ์ด๋ธ๋ฌ๋ฆฌ์์ [๊ธฐ๋ณธ ํ์ดํ๋ผ์ธ](./api/pipelines/overview)์ ์ดํด๋ณด๊ณ , ๋ชจ๋ธ๊ณผ ์ค์ผ์ค๋ฌ๋ฅผ ๋ณ๋๋ก ์ฌ์ฉํ์ฌ ํ์ดํ๋ผ์ธ์ ์ฒ์๋ถํฐ ํด์ฒดํ๊ณ ๋น๋ํ ์ ์๋์ง ํ์ธํด ๋ณด์ธ์.
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/using-diffusers/inpaint.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Text-guided ์ด๋ฏธ์ง ์ธํ์ธํ
(inpainting)
[[open-in-colab]]
[`StableDiffusionInpaintPipeline`]์ ๋ง์คํฌ์ ํ
์คํธ ํ๋กฌํํธ๋ฅผ ์ ๊ณตํ์ฌ ์ด๋ฏธ์ง์ ํน์ ๋ถ๋ถ์ ํธ์งํ ์ ์๋๋ก ํฉ๋๋ค. ์ด ๊ธฐ๋ฅ์ ์ธํ์ธํ
์์
์ ์ํด ํน๋ณํ ํ๋ จ๋ [`runwayml/stable-diffusion-inpainting`](https://huggingface.co/runwayml/stable-diffusion-inpainting)๊ณผ ๊ฐ์ Stable Diffusion ๋ฒ์ ์ ์ฌ์ฉํฉ๋๋ค.
๋จผ์ [`StableDiffusionInpaintPipeline`] ์ธ์คํด์ค๋ฅผ ๋ถ๋ฌ์ต๋๋ค:
```python
import PIL
import requests
import torch
from io import BytesIO
from diffusers import StableDiffusionInpaintPipeline
pipeline = StableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting",
torch_dtype=torch.float16,
)
pipeline = pipeline.to("cuda")
```
๋์ค์ ๊ต์ฒดํ ๊ฐ์์ง ์ด๋ฏธ์ง์ ๋ง์คํฌ๋ฅผ ๋ค์ด๋ก๋ํ์ธ์:
```python
def download_image(url):
response = requests.get(url)
return PIL.Image.open(BytesIO(response.content)).convert("RGB")
img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
init_image = download_image(img_url).resize((512, 512))
mask_image = download_image(mask_url).resize((512, 512))
```
์ด์ ๋ง์คํฌ๋ฅผ ๋ค๋ฅธ ๊ฒ์ผ๋ก ๊ต์ฒดํ๋ผ๋ ํ๋กฌํํธ๋ฅผ ๋ง๋ค ์ ์์ต๋๋ค:
```python
prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0]
```
`image` | `mask_image` | `prompt` | output |
:-------------------------:|:-------------------------:|:-------------------------:|-------------------------:|
<img src="https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" alt="drawing" width="250"/> | <img src="https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" alt="drawing" width="250"/> | ***Face of a yellow cat, high resolution, sitting on a park bench*** | <img src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/in_paint/yellow_cat_sitting_on_a_park_bench.png" alt="drawing" width="250"/> |
<Tip warning={true}>
์ด์ ์ ์คํ์ ์ธ ์ธํ์ธํ
๊ตฌํ์์๋ ํ์ง์ด ๋ฎ์ ๋ค๋ฅธ ํ๋ก์ธ์ค๋ฅผ ์ฌ์ฉํ์ต๋๋ค. ์ด์ ๋ฒ์ ๊ณผ์ ํธํ์ฑ์ ๋ณด์ฅํ๊ธฐ ์ํด ์ ๋ชจ๋ธ์ด ํฌํจ๋์ง ์์ ์ฌ์ ํ์ต๋ ํ์ดํ๋ผ์ธ์ ๋ถ๋ฌ์ค๋ฉด ์ด์ ์ธํ์ธํ
๋ฐฉ๋ฒ์ด ๊ณ์ ์ ์ฉ๋ฉ๋๋ค.
</Tip>
์๋ Space์์ ์ด๋ฏธ์ง ์ธํ์ธํ
์ ์ง์ ํด๋ณด์ธ์!
<iframe
src="https://runwayml-stable-diffusion-inpainting.hf.space"
frameborder="0"
width="850"
height="500"
></iframe>
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/using-diffusers/custom_pipeline_examples.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# ์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ
> **์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ์ ๋ํ ์์ธํ ๋ด์ฉ์ [์ด ์ด์](https://github.com/huggingface/diffusers/issues/841)๋ฅผ ์ฐธ์กฐํ์ธ์.
**์ปค๋ฎค๋ํฐ** ์์ ๋ ์ปค๋ฎค๋ํฐ์์ ์ถ๊ฐํ ์ถ๋ก ๋ฐ ํ๋ จ ์์ ๋ก ๊ตฌ์ฑ๋์ด ์์ต๋๋ค.
๋ค์ ํ๋ฅผ ์ฐธ์กฐํ์ฌ ๋ชจ๋ ์ปค๋ฎค๋ํฐ ์์ ์ ๋ํ ๊ฐ์๋ฅผ ํ์ธํ์๊ธฐ ๋ฐ๋๋๋ค. **์ฝ๋ ์์ **๋ฅผ ํด๋ฆญํ๋ฉด ๋ณต์ฌํ์ฌ ๋ถ์ฌ๋ฃ๊ธฐํ ์ ์๋ ์ฝ๋ ์์ ๋ฅผ ํ์ธํ ์ ์์ต๋๋ค.
์ปค๋ฎค๋ํฐ๊ฐ ์์๋๋ก ์๋ํ์ง ์๋ ๊ฒฝ์ฐ ์ด์๋ฅผ ๊ฐ์คํ๊ณ ์์ฑ์์๊ฒ ํ์ ๋ณด๋ด์ฃผ์ธ์.
| ์ | ์ค๋ช
| ์ฝ๋ ์์ | ์ฝ๋ฉ |์ ์ |
|:---------------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------:|
| CLIP Guided Stable Diffusion | CLIP ๊ฐ์ด๋ ๊ธฐ๋ฐ์ Stable Diffusion์ผ๋ก ํ
์คํธ์์ ์ด๋ฏธ์ง๋ก ์์ฑํ๊ธฐ | [CLIP Guided Stable Diffusion](#clip-guided-stable-diffusion) | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/CLIP_Guided_Stable_diffusion_with_diffusers.ipynb) | [Suraj Patil](https://github.com/patil-suraj/) |
| One Step U-Net (Dummy) | ์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ์ ์ด๋ป๊ฒ ์ฌ์ฉํด์ผ ํ๋์ง์ ๋ํ ์์(์ฐธ๊ณ https://github.com/huggingface/diffusers/issues/841) | [One Step U-Net](#one-step-unet) | - | [Patrick von Platen](https://github.com/patrickvonplaten/) |
| Stable Diffusion Interpolation | ์๋ก ๋ค๋ฅธ ํ๋กฌํํธ/์๋ ๊ฐ Stable Diffusion์ latent space ๋ณด๊ฐ | [Stable Diffusion Interpolation](#stable-diffusion-interpolation) | - | [Nate Raw](https://github.com/nateraw/) |
| Stable Diffusion Mega | ๋ชจ๋ ๊ธฐ๋ฅ์ ๊ฐ์ถ **ํ๋์** Stable Diffusion ํ์ดํ๋ผ์ธ [Text2Image](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py), [Image2Image](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py) and [Inpainting](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py) | [Stable Diffusion Mega](#stable-diffusion-mega) | - | [Patrick von Platen](https://github.com/patrickvonplaten/) |
| Long Prompt Weighting Stable Diffusion | ํ ํฐ ๊ธธ์ด ์ ํ์ด ์๊ณ ํ๋กฌํํธ์์ ํ์ฑ ๊ฐ์ค์น ์ง์์ ํ๋ **ํ๋์** Stable Diffusion ํ์ดํ๋ผ์ธ, | [Long Prompt Weighting Stable Diffusion](#long-prompt-weighting-stable-diffusion) |- | [SkyTNT](https://github.com/SkyTNT) |
| Speech to Image | ์๋ ์์ฑ ์ธ์์ ์ฌ์ฉํ์ฌ ํ
์คํธ๋ฅผ ์์ฑํ๊ณ Stable Diffusion์ ์ฌ์ฉํ์ฌ ์ด๋ฏธ์ง๋ฅผ ์์ฑํฉ๋๋ค. | [Speech to Image](#speech-to-image) | - | [Mikail Duzenli](https://github.com/MikailINTech) |
์ปค์คํ
ํ์ดํ๋ผ์ธ์ ๋ถ๋ฌ์ค๋ ค๋ฉด `diffusers/examples/community`์ ์๋ ํ์ผ ์ค ํ๋๋ก์ `custom_pipeline` ์ธ์๋ฅผ `DiffusionPipeline`์ ์ ๋ฌํ๊ธฐ๋ง ํ๋ฉด ๋ฉ๋๋ค. ์์ ๋ง์ ํ์ดํ๋ผ์ธ์ด ์๋ PR์ ๋ณด๋ด์ฃผ์๋ฉด ๋น ๋ฅด๊ฒ ๋ณํฉํด๋๋ฆฌ๊ฒ ์ต๋๋ค.
```py
pipe = DiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", custom_pipeline="filename_in_the_community_folder"
)
```
## ์ฌ์ฉ ์์
### CLIP ๊ฐ์ด๋ ๊ธฐ๋ฐ์ Stable Diffusion
๋ชจ๋ ๋
ธ์ด์ฆ ์ ๊ฑฐ ๋จ๊ณ์์ ์ถ๊ฐ CLIP ๋ชจ๋ธ์ ํตํด Stable Diffusion์ ๊ฐ์ด๋ํจ์ผ๋ก์จ CLIP ๋ชจ๋ธ ๊ธฐ๋ฐ์ Stable Diffusion์ ๋ณด๋ค ๋ ์ฌ์ค์ ์ธ ์ด๋ฏธ์ง๋ฅผ ์์ฑ์ ํ ์ ์์ต๋๋ค.
๋ค์ ์ฝ๋๋ ์ฝ 12GB์ GPU RAM์ด ํ์ํฉ๋๋ค.
```python
from diffusers import DiffusionPipeline
from transformers import CLIPImageProcessor, CLIPModel
import torch
feature_extractor = CLIPImageProcessor.from_pretrained("laion/CLIP-ViT-B-32-laion2B-s34B-b79K")
clip_model = CLIPModel.from_pretrained("laion/CLIP-ViT-B-32-laion2B-s34B-b79K", torch_dtype=torch.float16)
guided_pipeline = DiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",
custom_pipeline="clip_guided_stable_diffusion",
clip_model=clip_model,
feature_extractor=feature_extractor,
torch_dtype=torch.float16,
)
guided_pipeline.enable_attention_slicing()
guided_pipeline = guided_pipeline.to("cuda")
prompt = "fantasy book cover, full moon, fantasy forest landscape, golden vector elements, fantasy magic, dark light night, intricate, elegant, sharp focus, illustration, highly detailed, digital painting, concept art, matte, art by WLOP and Artgerm and Albert Bierstadt, masterpiece"
generator = torch.Generator(device="cuda").manual_seed(0)
images = []
for i in range(4):
image = guided_pipeline(
prompt,
num_inference_steps=50,
guidance_scale=7.5,
clip_guidance_scale=100,
num_cutouts=4,
use_cutouts=False,
generator=generator,
).images[0]
images.append(image)
# ์ด๋ฏธ์ง ๋ก์ปฌ์ ์ ์ฅํ๊ธฐ
for i, img in enumerate(images):
img.save(f"./clip_guided_sd/image_{i}.png")
```
์ด๋ฏธ์ง` ๋ชฉ๋ก์๋ ๋ก์ปฌ์ ์ ์ฅํ๊ฑฐ๋ ๊ตฌ๊ธ ์ฝ๋ฉ์ ์ง์ ํ์ํ ์ ์๋ PIL ์ด๋ฏธ์ง ๋ชฉ๋ก์ด ํฌํจ๋์ด ์์ต๋๋ค. ์์ฑ๋ ์ด๋ฏธ์ง๋ ๊ธฐ๋ณธ์ ์ผ๋ก ์์ ์ ์ธ ํ์ฐ์ ์ฌ์ฉํ๋ ๊ฒ๋ณด๋ค ํ์ง์ด ๋์ ๊ฒฝํฅ์ด ์์ต๋๋ค. ์๋ฅผ ๋ค์ด ์์ ์คํฌ๋ฆฝํธ๋ ๋ค์๊ณผ ๊ฐ์ ์ด๋ฏธ์ง๋ฅผ ์์ฑํฉ๋๋ค:
.
### One Step Unet
์์ "one-step-unet"๋ ๋ค์๊ณผ ๊ฐ์ด ์คํํ ์ ์์ต๋๋ค.
```python
from diffusers import DiffusionPipeline
pipe = DiffusionPipeline.from_pretrained("google/ddpm-cifar10-32", custom_pipeline="one_step_unet")
pipe()
```
**์ฐธ๊ณ **: ์ด ์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ์ ๊ธฐ๋ฅ์ผ๋ก ์ ์ฉํ์ง ์์ผ๋ฉฐ ์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ์ ์ถ๊ฐํ ์ ์๋ ๋ฐฉ๋ฒ์ ์์์ผ ๋ฟ์
๋๋ค(https://github.com/huggingface/diffusers/issues/841 ์ฐธ์กฐ).
### Stable Diffusion Interpolation
๋ค์ ์ฝ๋๋ ์ต์ 8GB VRAM์ GPU์์ ์คํํ ์ ์์ผ๋ฉฐ ์ฝ 5๋ถ ์ ๋ ์์๋ฉ๋๋ค.
```python
from diffusers import DiffusionPipeline
import torch
pipe = DiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",
torch_dtype=torch.float16,
safety_checker=None, # Very important for videos...lots of false positives while interpolating
custom_pipeline="interpolate_stable_diffusion",
).to("cuda")
pipe.enable_attention_slicing()
frame_filepaths = pipe.walk(
prompts=["a dog", "a cat", "a horse"],
seeds=[42, 1337, 1234],
num_interpolation_steps=16,
output_dir="./dreams",
batch_size=4,
height=512,
width=512,
guidance_scale=8.5,
num_inference_steps=50,
)
```
walk(...)` ํจ์์ ์ถ๋ ฅ์ `output_dir`์ ์ ์๋ ๋๋ก ํด๋์ ์ ์ฅ๋ ์ด๋ฏธ์ง ๋ชฉ๋ก์ ๋ฐํํฉ๋๋ค. ์ด ์ด๋ฏธ์ง๋ฅผ ์ฌ์ฉํ์ฌ ์์ ์ ์ผ๋ก ํ์ฐ๋๋ ๋์์์ ๋ง๋ค ์ ์์ต๋๋ค.
> ์์ ๋ ํ์ฐ์ ์ด์ฉํ ๋์์ ์ ์ ๋ฐฉ๋ฒ๊ณผ ๋ ๋ง์ ๊ธฐ๋ฅ์ ๋ํ ์์ธํ ๋ด์ฉ์ https://github.com/nateraw/stable-diffusion-videos ์์ ํ์ธํ์๊ธฐ ๋ฐ๋๋๋ค.
### Stable Diffusion Mega
The Stable Diffusion Mega ํ์ดํ๋ผ์ธ์ ์ฌ์ฉํ๋ฉด Stable Diffusion ํ์ดํ๋ผ์ธ์ ์ฃผ์ ์ฌ์ฉ ์ฌ๋ก๋ฅผ ๋จ์ผ ํด๋์ค์์ ์ฌ์ฉํ ์ ์์ต๋๋ค.
```python
#!/usr/bin/env python3
from diffusers import DiffusionPipeline
import PIL
import requests
from io import BytesIO
import torch
def download_image(url):
response = requests.get(url)
return PIL.Image.open(BytesIO(response.content)).convert("RGB")
pipe = DiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",
custom_pipeline="stable_diffusion_mega",
torch_dtype=torch.float16,
)
pipe.to("cuda")
pipe.enable_attention_slicing()
### Text-to-Image
images = pipe.text2img("An astronaut riding a horse").images
### Image-to-Image
init_image = download_image(
"https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
)
prompt = "A fantasy landscape, trending on artstation"
images = pipe.img2img(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5).images
### Inpainting
img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
init_image = download_image(img_url).resize((512, 512))
mask_image = download_image(mask_url).resize((512, 512))
prompt = "a cat sitting on a bench"
images = pipe.inpaint(prompt=prompt, image=init_image, mask_image=mask_image, strength=0.75).images
```
์์ ํ์๋ ๊ฒ์ฒ๋ผ ํ๋์ ํ์ดํ๋ผ์ธ์์ 'ํ
์คํธ-์ด๋ฏธ์ง ๋ณํ', '์ด๋ฏธ์ง-์ด๋ฏธ์ง ๋ณํ', '์ธํ์ธํ
'์ ๋ชจ๋ ์คํํ ์ ์์ต๋๋ค.
### Long Prompt Weighting Stable Diffusion
ํ์ดํ๋ผ์ธ์ ์ฌ์ฉํ๋ฉด 77๊ฐ์ ํ ํฐ ๊ธธ์ด ์ ํ ์์ด ํ๋กฌํํธ๋ฅผ ์
๋ ฅํ ์ ์์ต๋๋ค. ๋ํ "()"๋ฅผ ์ฌ์ฉํ์ฌ ๋จ์ด ๊ฐ์ค์น๋ฅผ ๋์ด๊ฑฐ๋ "[]"๋ฅผ ์ฌ์ฉํ์ฌ ๋จ์ด ๊ฐ์ค์น๋ฅผ ๋ฎ์ถ ์ ์์ต๋๋ค.
๋ํ ํ์ดํ๋ผ์ธ์ ์ฌ์ฉํ๋ฉด ๋จ์ผ ํด๋์ค์์ Stable Diffusion ํ์ดํ๋ผ์ธ์ ์ฃผ์ ์ฌ์ฉ ์ฌ๋ก๋ฅผ ์ฌ์ฉํ ์ ์์ต๋๋ค.
#### pytorch
```python
from diffusers import DiffusionPipeline
import torch
pipe = DiffusionPipeline.from_pretrained(
"hakurei/waifu-diffusion", custom_pipeline="lpw_stable_diffusion", torch_dtype=torch.float16
)
pipe = pipe.to("cuda")
prompt = "best_quality (1girl:1.3) bow bride brown_hair closed_mouth frilled_bow frilled_hair_tubes frills (full_body:1.3) fox_ear hair_bow hair_tubes happy hood japanese_clothes kimono long_sleeves red_bow smile solo tabi uchikake white_kimono wide_sleeves cherry_blossoms"
neg_prompt = "lowres, bad_anatomy, error_body, error_hair, error_arm, error_hands, bad_hands, error_fingers, bad_fingers, missing_fingers, error_legs, bad_legs, multiple_legs, missing_legs, error_lighting, error_shadow, error_reflection, text, error, extra_digit, fewer_digits, cropped, worst_quality, low_quality, normal_quality, jpeg_artifacts, signature, watermark, username, blurry"
pipe.text2img(prompt, negative_prompt=neg_prompt, width=512, height=512, max_embeddings_multiples=3).images[0]
```
#### onnxruntime
```python
from diffusers import DiffusionPipeline
import torch
pipe = DiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",
custom_pipeline="lpw_stable_diffusion_onnx",
revision="onnx",
provider="CUDAExecutionProvider",
)
prompt = "a photo of an astronaut riding a horse on mars, best quality"
neg_prompt = "lowres, bad anatomy, error body, error hair, error arm, error hands, bad hands, error fingers, bad fingers, missing fingers, error legs, bad legs, multiple legs, missing legs, error lighting, error shadow, error reflection, text, error, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry"
pipe.text2img(prompt, negative_prompt=neg_prompt, width=512, height=512, max_embeddings_multiples=3).images[0]
```
ํ ํฐ ์ธ๋ฑ์ค ์ํ์ค ๊ธธ์ด๊ฐ ์ด ๋ชจ๋ธ์ ์ง์ ๋ ์ต๋ ์ํ์ค ๊ธธ์ด๋ณด๋ค ๊ธธ๋ฉด(*** > 77). ์ด ์ํ์ค๋ฅผ ๋ชจ๋ธ์์ ์คํํ๋ฉด ์ธ๋ฑ์ฑ ์ค๋ฅ๊ฐ ๋ฐ์ํฉ๋๋ค`. ์ ์์ ์ธ ํ์์ด๋ ๊ฑฑ์ ํ์ง ๋ง์ธ์.
### Speech to Image
๋ค์ ์ฝ๋๋ ์ฌ์ ํ์ต๋ OpenAI whisper-small๊ณผ Stable Diffusion์ ์ฌ์ฉํ์ฌ ์ค๋์ค ์ํ์์ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ ์ ์์ต๋๋ค.
```Python
import torch
import matplotlib.pyplot as plt
from datasets import load_dataset
from diffusers import DiffusionPipeline
from transformers import (
WhisperForConditionalGeneration,
WhisperProcessor,
)
device = "cuda" if torch.cuda.is_available() else "cpu"
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
audio_sample = ds[3]
text = audio_sample["text"].lower()
speech_data = audio_sample["audio"]["array"]
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small").to(device)
processor = WhisperProcessor.from_pretrained("openai/whisper-small")
diffuser_pipeline = DiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",
custom_pipeline="speech_to_image_diffusion",
speech_model=model,
speech_processor=processor,
torch_dtype=torch.float16,
)
diffuser_pipeline.enable_attention_slicing()
diffuser_pipeline = diffuser_pipeline.to(device)
output = diffuser_pipeline(speech_data)
plt.imshow(output.images[0])
```
์ ์์๋ ๋ค์์ ๊ฒฐ๊ณผ ์ด๋ฏธ์ง๋ฅผ ๋ณด์
๋๋ค.

| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/using-diffusers/schedulers.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# ์ค์ผ์ค๋ฌ
diffusion ํ์ดํ๋ผ์ธ์ diffusion ๋ชจ๋ธ, ์ค์ผ์ค๋ฌ ๋ฑ์ ์ปดํฌ๋ํธ๋ค๋ก ๊ตฌ์ฑ๋ฉ๋๋ค. ๊ทธ๋ฆฌ๊ณ ํ์ดํ๋ผ์ธ ์์ ์ผ๋ถ ์ปดํฌ๋ํธ๋ฅผ ๋ค๋ฅธ ์ปดํฌ๋ํธ๋ก ๊ต์ฒดํ๋ ์์ ์ปค์คํฐ๋ง์ด์ง ์ญ์ ๊ฐ๋ฅํฉ๋๋ค. ์ด์ ๊ฐ์ ์ปดํฌ๋ํธ ์ปค์คํฐ๋ง์ด์ง์ ๊ฐ์ฅ ๋ํ์ ์ธ ์์๊ฐ ๋ฐ๋ก [์ค์ผ์ค๋ฌ](../api/schedulers/overview.md)๋ฅผ ๊ต์ฒดํ๋ ๊ฒ์
๋๋ค.
์ค์ผ์ฅด๋ฌ๋ ๋ค์๊ณผ ๊ฐ์ด diffusion ์์คํ
์ ์ ๋ฐ์ ์ธ ๋๋
ธ์ด์ง ํ๋ก์ธ์ค๋ฅผ ์ ์ํฉ๋๋ค.
- ๋๋
ธ์ด์ง ์คํ
์ ์ผ๋ง๋ ๊ฐ์ ธ๊ฐ์ผ ํ ๊น?
- ํ๋ฅ ์ ์ผ๋ก(stochastic) ํน์ ํ์ ์ ์ผ๋ก(deterministic)?
- ๋๋
ธ์ด์ง ๋ ์ํ์ ์ฐพ์๋ด๊ธฐ ์ํด ์ด๋ค ์๊ณ ๋ฆฌ์ฆ์ ์ฌ์ฉํด์ผ ํ ๊น?
์ด๋ฌํ ํ๋ก์ธ์ค๋ ๋ค์ ๋ํดํ๊ณ , ๋๋
ธ์ด์ง ์๋์ ๋๋
ธ์ด์ง ํ๋ฆฌํฐ ์ฌ์ด์ ํธ๋ ์ด๋ ์คํ๋ฅผ ์ ์ํด์ผ ํ๋ ๋ฌธ์ ๊ฐ ๋ ์ ์์ต๋๋ค. ์ฃผ์ด์ง ํ์ดํ๋ผ์ธ์ ์ด๋ค ์ค์ผ์ค๋ฌ๊ฐ ๊ฐ์ฅ ์ ํฉํ์ง๋ฅผ ์ ๋์ ์ผ๋ก ํ๋จํ๋ ๊ฒ์ ๋งค์ฐ ์ด๋ ค์ด ์ผ์
๋๋ค. ์ด๋ก ์ธํด ์ผ๋จ ํด๋น ์ค์ผ์ค๋ฌ๋ฅผ ์ง์ ์ฌ์ฉํ์ฌ, ์์ฑ๋๋ ์ด๋ฏธ์ง๋ฅผ ์ง์ ๋์ผ๋ก ๋ณด๋ฉฐ, ์ ์ฑ์ ์ผ๋ก ์ฑ๋ฅ์ ํ๋จํด๋ณด๋ ๊ฒ์ด ์ถ์ฒ๋๊ณค ํฉ๋๋ค.
## ํ์ดํ๋ผ์ธ ๋ถ๋ฌ์ค๊ธฐ
๋จผ์ ์คํ
์ด๋ธ diffusion ํ์ดํ๋ผ์ธ์ ๋ถ๋ฌ์ค๋๋ก ํด๋ณด๊ฒ ์ต๋๋ค. ๋ฌผ๋ก ์คํ
์ด๋ธ diffusion์ ์ฌ์ฉํ๊ธฐ ์ํด์๋, ํ๊น
ํ์ด์ค ํ๋ธ์ ๋ฑ๋ก๋ ์ฌ์ฉ์์ฌ์ผ ํ๋ฉฐ, ๊ด๋ จ [๋ผ์ด์ผ์ค](https://huggingface.co/runwayml/stable-diffusion-v1-5)์ ๋์ํด์ผ ํ๋ค๋ ์ ์ ์์ง ๋ง์์ฃผ์ธ์.
*์ญ์ ์ฃผ: ๋ค๋ง, ํ์ฌ ์ ๊ท๋ก ์์ฑํ ํ๊น
ํ์ด์ค ๊ณ์ ์ ๋ํด์๋ ๋ผ์ด์ผ์ค ๋์๋ฅผ ์๊ตฌํ์ง ์๋ ๊ฒ์ผ๋ก ๋ณด์
๋๋ค!*
```python
from huggingface_hub import login
from diffusers import DiffusionPipeline
import torch
# first we need to login with our access token
login()
# Now we can download the pipeline
pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
```
๋ค์์ผ๋ก, GPU๋ก ์ด๋ํฉ๋๋ค.
```python
pipeline.to("cuda")
```
## ์ค์ผ์ค๋ฌ ์ก์ธ์ค
์ค์ผ์ค๋ฌ๋ ์ธ์ ๋ ํ์ดํ๋ผ์ธ์ ์ปดํฌ๋ํธ๋ก์ ์กด์ฌํ๋ฉฐ, ์ผ๋ฐ์ ์ผ๋ก ํ์ดํ๋ผ์ธ ์ธ์คํด์ค ๋ด์ `scheduler`๋ผ๋ ์ด๋ฆ์ ์์ฑ(property)์ผ๋ก ์ ์๋์ด ์์ต๋๋ค.
```python
pipeline.scheduler
```
**Output**:
```
PNDMScheduler {
"_class_name": "PNDMScheduler",
"_diffusers_version": "0.8.0.dev0",
"beta_end": 0.012,
"beta_schedule": "scaled_linear",
"beta_start": 0.00085,
"clip_sample": false,
"num_train_timesteps": 1000,
"set_alpha_to_one": false,
"skip_prk_steps": true,
"steps_offset": 1,
"trained_betas": null
}
```
์ถ๋ ฅ ๊ฒฐ๊ณผ๋ฅผ ํตํด, ์ฐ๋ฆฌ๋ ํด๋น ์ค์ผ์ค๋ฌ๊ฐ [`PNDMScheduler`]์ ์ธ์คํด์ค๋ผ๋ ๊ฒ์ ์ ์ ์์ต๋๋ค. ์ด์ [`PNDMScheduler`]์ ๋ค๋ฅธ ์ค์ผ์ค๋ฌ๋ค์ ์ฑ๋ฅ์ ๋น๊ตํด๋ณด๋๋ก ํ๊ฒ ์ต๋๋ค. ๋จผ์ ํ
์คํธ์ ์ฌ์ฉํ ํ๋กฌํํธ๋ฅผ ๋ค์๊ณผ ๊ฐ์ด ์ ์ํด๋ณด๋๋ก ํ๊ฒ ์ต๋๋ค.
```python
prompt = "A photograph of an astronaut riding a horse on Mars, high resolution, high definition."
```
๋ค์์ผ๋ก ์ ์ฌํ ์ด๋ฏธ์ง ์์ฑ์ ๋ณด์ฅํ๊ธฐ ์ํด์, ๋ค์๊ณผ ๊ฐ์ด ๋๋ค์๋๋ฅผ ๊ณ ์ ํด์ฃผ๋๋ก ํ๊ฒ ์ต๋๋ค.
```python
generator = torch.Generator(device="cuda").manual_seed(8)
image = pipeline(prompt, generator=generator).images[0]
image
```
<p align="center">
<br>
<img src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/diffusers_docs/astronaut_pndm.png" width="400"/>
<br>
</p>
## ์ค์ผ์ค๋ฌ ๊ต์ฒดํ๊ธฐ
๋ค์์ผ๋ก ํ์ดํ๋ผ์ธ์ ์ค์ผ์ค๋ฌ๋ฅผ ๋ค๋ฅธ ์ค์ผ์ค๋ฌ๋ก ๊ต์ฒดํ๋ ๋ฐฉ๋ฒ์ ๋ํด ์์๋ณด๊ฒ ์ต๋๋ค. ๋ชจ๋ ์ค์ผ์ค๋ฌ๋ [`SchedulerMixin.compatibles`]๋ผ๋ ์์ฑ(property)์ ๊ฐ๊ณ ์์ต๋๋ค. ํด๋น ์์ฑ์ **ํธํ ๊ฐ๋ฅํ** ์ค์ผ์ค๋ฌ๋ค์ ๋ํ ์ ๋ณด๋ฅผ ๋ด๊ณ ์์ต๋๋ค.
```python
pipeline.scheduler.compatibles
```
**Output**:
```
[diffusers.schedulers.scheduling_lms_discrete.LMSDiscreteScheduler,
diffusers.schedulers.scheduling_ddim.DDIMScheduler,
diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler,
diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler,
diffusers.schedulers.scheduling_pndm.PNDMScheduler,
diffusers.schedulers.scheduling_ddpm.DDPMScheduler,
diffusers.schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteScheduler]
```
ํธํ๋๋ ์ค์ผ์ค๋ฌ๋ค์ ์ดํด๋ณด๋ฉด ์๋์ ๊ฐ์ต๋๋ค.
- [`LMSDiscreteScheduler`],
- [`DDIMScheduler`],
- [`DPMSolverMultistepScheduler`],
- [`EulerDiscreteScheduler`],
- [`PNDMScheduler`],
- [`DDPMScheduler`],
- [`EulerAncestralDiscreteScheduler`].
์์ ์ ์ํ๋ ํ๋กฌํํธ๋ฅผ ์ฌ์ฉํด์ ๊ฐ๊ฐ์ ์ค์ผ์ค๋ฌ๋ค์ ๋น๊ตํด๋ณด๋๋ก ํ๊ฒ ์ต๋๋ค.
๋จผ์ ํ์ดํ๋ผ์ธ ์์ ์ค์ผ์ค๋ฌ๋ฅผ ๋ฐ๊พธ๊ธฐ ์ํด [`ConfigMixin.config`] ์์ฑ๊ณผ [`ConfigMixin.from_config`] ๋ฉ์๋๋ฅผ ํ์ฉํด๋ณด๋ ค๊ณ ํฉ๋๋ค.
```python
pipeline.scheduler.config
```
**Output**:
```
FrozenDict([('num_train_timesteps', 1000),
('beta_start', 0.00085),
('beta_end', 0.012),
('beta_schedule', 'scaled_linear'),
('trained_betas', None),
('skip_prk_steps', True),
('set_alpha_to_one', False),
('steps_offset', 1),
('_class_name', 'PNDMScheduler'),
('_diffusers_version', '0.8.0.dev0'),
('clip_sample', False)])
```
๊ธฐ์กด ์ค์ผ์ค๋ฌ์ config๋ฅผ ํธํ ๊ฐ๋ฅํ ๋ค๋ฅธ ์ค์ผ์ค๋ฌ์ ์ด์ํ๋ ๊ฒ ์ญ์ ๊ฐ๋ฅํฉ๋๋ค.
๋ค์ ์์๋ ๊ธฐ์กด ์ค์ผ์ค๋ฌ(`pipeline.scheduler`)๋ฅผ ๋ค๋ฅธ ์ข
๋ฅ์ ์ค์ผ์ค๋ฌ(`DDIMScheduler`)๋ก ๋ฐ๊พธ๋ ์ฝ๋์
๋๋ค. ๊ธฐ์กด ์ค์ผ์ค๋ฌ๊ฐ ๊ฐ๊ณ ์๋ config๋ฅผ `.from_config` ๋ฉ์๋์ ์ธ์๋ก ์ ๋ฌํ๋ ๊ฒ์ ํ์ธํ ์ ์์ต๋๋ค.
```python
from diffusers import DDIMScheduler
pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
```
์ด์ ํ์ดํ๋ผ์ธ์ ์คํํด์ ๋ ์ค์ผ์ค๋ฌ ์ฌ์ด์ ์์ฑ๋ ์ด๋ฏธ์ง์ ํ๋ฆฌํฐ๋ฅผ ๋น๊ตํด๋ด
์๋ค.
```python
generator = torch.Generator(device="cuda").manual_seed(8)
image = pipeline(prompt, generator=generator).images[0]
image
```
<p align="center">
<br>
<img src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/diffusers_docs/astronaut_ddim.png" width="400"/>
<br>
</p>
## ์ค์ผ์ค๋ฌ๋ค ๋น๊ตํด๋ณด๊ธฐ
์ง๊ธ๊น์ง๋ [`PNDMScheduler`]์ [`DDIMScheduler`] ์ค์ผ์ค๋ฌ๋ฅผ ์คํํด๋ณด์์ต๋๋ค. ์์ง ๋น๊ตํด๋ณผ ์ค์ผ์ค๋ฌ๋ค์ด ๋ ๋ง์ด ๋จ์์์ผ๋ ๊ณ์ ๋น๊ตํด๋ณด๋๋ก ํ๊ฒ ์ต๋๋ค.
[`LMSDiscreteScheduler`]์ ์ผ๋ฐ์ ์ผ๋ก ๋ ์ข์ ๊ฒฐ๊ณผ๋ฅผ ๋ณด์ฌ์ค๋๋ค.
```python
from diffusers import LMSDiscreteScheduler
pipeline.scheduler = LMSDiscreteScheduler.from_config(pipeline.scheduler.config)
generator = torch.Generator(device="cuda").manual_seed(8)
image = pipeline(prompt, generator=generator).images[0]
image
```
<p align="center">
<br>
<img src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/diffusers_docs/astronaut_lms.png" width="400"/>
<br>
</p>
[`EulerDiscreteScheduler`]์ [`EulerAncestralDiscreteScheduler`] ๊ณ ์ 30๋ฒ์ inference step๋ง์ผ๋ก๋ ๋์ ํ๋ฆฌํฐ์ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ๋ ๊ฒ์ ์ ์ ์์ต๋๋ค.
```python
from diffusers import EulerDiscreteScheduler
pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config)
generator = torch.Generator(device="cuda").manual_seed(8)
image = pipeline(prompt, generator=generator, num_inference_steps=30).images[0]
image
```
<p align="center">
<br>
<img src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/diffusers_docs/astronaut_euler_discrete.png" width="400"/>
<br>
</p>
```python
from diffusers import EulerAncestralDiscreteScheduler
pipeline.scheduler = EulerAncestralDiscreteScheduler.from_config(pipeline.scheduler.config)
generator = torch.Generator(device="cuda").manual_seed(8)
image = pipeline(prompt, generator=generator, num_inference_steps=30).images[0]
image
```
<p align="center">
<br>
<img src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/diffusers_docs/astronaut_euler_ancestral.png" width="400"/>
<br>
</p>
์ง๊ธ ์ด ๋ฌธ์๋ฅผ ์์ฑํ๋ ํ์์ ๊ธฐ์ค์์ , [`DPMSolverMultistepScheduler`]๊ฐ ์๊ฐ ๋๋น ๊ฐ์ฅ ์ข์ ํ์ง์ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ๋ ๊ฒ ๊ฐ์ต๋๋ค. 20๋ฒ ์ ๋์ ์คํ
๋ง์ผ๋ก๋ ์คํ๋ ์ ์์ต๋๋ค.
```python
from diffusers import DPMSolverMultistepScheduler
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
generator = torch.Generator(device="cuda").manual_seed(8)
image = pipeline(prompt, generator=generator, num_inference_steps=20).images[0]
image
```
<p align="center">
<br>
<img src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/diffusers_docs/astronaut_dpm.png" width="400"/>
<br>
</p>
๋ณด์๋ค์ํผ ์์ฑ๋ ์ด๋ฏธ์ง๋ค์ ๋งค์ฐ ๋น์ทํ๊ณ , ๋น์ทํ ํ๋ฆฌํฐ๋ฅผ ๋ณด์ด๋ ๊ฒ ๊ฐ์ต๋๋ค. ์ค์ ๋ก ์ด๋ค ์ค์ผ์ค๋ฌ๋ฅผ ์ ํํ ๊ฒ์ธ๊ฐ๋ ์ข
์ข
ํน์ ์ด์ฉ ์ฌ๋ก์ ๊ธฐ๋ฐํด์ ๊ฒฐ์ ๋๊ณค ํฉ๋๋ค. ๊ฒฐ๊ตญ ์ฌ๋ฌ ์ข
๋ฅ์ ์ค์ผ์ค๋ฌ๋ฅผ ์ง์ ์คํ์์ผ๋ณด๊ณ ๋์ผ๋ก ์ง์ ๋น๊ตํด์ ํ๋จํ๋ ๊ฒ ์ข์ ์ ํ์ผ ๊ฒ ๊ฐ์ต๋๋ค.
## Flax์์ ์ค์ผ์ค๋ฌ ๊ต์ฒดํ๊ธฐ
JAX/Flax ์ฌ์ฉ์์ธ ๊ฒฝ์ฐ ๊ธฐ๋ณธ ํ์ดํ๋ผ์ธ ์ค์ผ์ค๋ฌ๋ฅผ ๋ณ๊ฒฝํ ์๋ ์์ต๋๋ค. ๋ค์์ Flax Stable Diffusion ํ์ดํ๋ผ์ธ๊ณผ ์ด๊ณ ์ [DDPM-Solver++ ์ค์ผ์ค๋ฌ๋ฅผ](../api/schedulers/multistep_dpm_solver) ์ฌ์ฉํ์ฌ ์ถ๋ก ์ ์คํํ๋ ๋ฐฉ๋ฒ์ ๋ํ ์์์
๋๋ค .
```Python
import jax
import numpy as np
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxStableDiffusionPipeline, FlaxDPMSolverMultistepScheduler
model_id = "runwayml/stable-diffusion-v1-5"
scheduler, scheduler_state = FlaxDPMSolverMultistepScheduler.from_pretrained(
model_id,
subfolder="scheduler"
)
pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(
model_id,
scheduler=scheduler,
revision="bf16",
dtype=jax.numpy.bfloat16,
)
params["scheduler"] = scheduler_state
# Generate 1 image per parallel device (8 on TPUv2-8 or TPUv3-8)
prompt = "a photo of an astronaut riding a horse on mars"
num_samples = jax.device_count()
prompt_ids = pipeline.prepare_inputs([prompt] * num_samples)
prng_seed = jax.random.PRNGKey(0)
num_inference_steps = 25
# shard inputs and rng
params = replicate(params)
prng_seed = jax.random.split(prng_seed, jax.device_count())
prompt_ids = shard(prompt_ids)
images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images
images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
```
<Tip warning={true}>
๋ค์ Flax ์ค์ผ์ค๋ฌ๋ *์์ง* Flax Stable Diffusion ํ์ดํ๋ผ์ธ๊ณผ ํธํ๋์ง ์์ต๋๋ค.
- `FlaxLMSDiscreteScheduler`
- `FlaxDDPMScheduler`
</Tip>
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/using-diffusers/using_safetensors.md
|
# ์ธ์ดํํ
์ ๋ก๋
[safetensors](https://github.com/huggingface/safetensors)๋ ํ
์๋ฅผ ์ ์ฅํ๊ณ ๋ก๋ํ๊ธฐ ์ํ ์์ ํ๊ณ ๋น ๋ฅธ ํ์ผ ํ์์
๋๋ค. ์ผ๋ฐ์ ์ผ๋ก PyTorch ๋ชจ๋ธ ๊ฐ์ค์น๋ Python์ [`pickle`](https://docs.python.org/3/library/pickle.html) ์ ํธ๋ฆฌํฐ๋ฅผ ์ฌ์ฉํ์ฌ `.bin` ํ์ผ์ ์ ์ฅ๋๊ฑฐ๋ `ํผํด`๋ฉ๋๋ค. ๊ทธ๋ฌ๋ `ํผํด`์ ์์ ํ์ง ์์ผ๋ฉฐ ํผํด๋ ํ์ผ์๋ ์คํ๋ ์ ์๋ ์
์ฑ ์ฝ๋๊ฐ ํฌํจ๋ ์ ์์ต๋๋ค. ์ธ์ดํํ
์๋ `ํผํด`์ ์์ ํ ๋์์ผ๋ก ๋ชจ๋ธ ๊ฐ์ค์น๋ฅผ ๊ณต์ ํ๋ ๋ฐ ์ด์์ ์
๋๋ค.
์ด ๊ฐ์ด๋์์๋ `.safetensor` ํ์ผ์ ๋ก๋ํ๋ ๋ฐฉ๋ฒ๊ณผ ๋ค๋ฅธ ํ์์ผ๋ก ์ ์ฅ๋ ์์ ์ ํ์ฐ ๋ชจ๋ธ ๊ฐ์ค์น๋ฅผ `.safetensor`๋ก ๋ณํํ๋ ๋ฐฉ๋ฒ์ ๋ณด์ฌ๋๋ฆฌ๊ฒ ์ต๋๋ค. ์์ํ๊ธฐ ์ ์ ์ธ์ดํํ
์๊ฐ ์ค์น๋์ด ์๋์ง ํ์ธํ์ธ์:
```bash
!pip install safetensors
```
['runwayml/stable-diffusion-v1-5`] (https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/main) ๋ฆฌํฌ์งํ ๋ฆฌ๋ฅผ ๋ณด๋ฉด `text_encoder`, `unet` ๋ฐ `vae` ํ์ ํด๋์ ๊ฐ์ค์น๊ฐ `.safetensors` ํ์์ผ๋ก ์ ์ฅ๋์ด ์๋ ๊ฒ์ ๋ณผ ์ ์์ต๋๋ค. ๊ธฐ๋ณธ์ ์ผ๋ก ๐ค ๋ํจ์ ๋ ๋ชจ๋ธ ์ ์ฅ์์์ ์ฌ์ฉํ ์ ์๋ ๊ฒฝ์ฐ ํด๋น ํ์ ํด๋์์ ์ด๋ฌํ '.safetensors` ํ์ผ์ ์๋์ผ๋ก ๋ก๋ํฉ๋๋ค.
๋ณด๋ค ๋ช
์์ ์ธ ์ ์ด๋ฅผ ์ํด ์ ํ์ ์ผ๋ก `์ฌ์ฉ_์ธ์ดํํ
์=True`๋ฅผ ์ค์ ํ ์ ์์ต๋๋ค(`์ธ์ดํํ
์`๊ฐ ์ค์น๋์ง ์์ ๊ฒฝ์ฐ ์ค์นํ๋ผ๋ ์ค๋ฅ ๋ฉ์์ง๊ฐ ํ์๋จ):
```py
from diffusers import DiffusionPipeline
pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", use_safetensors=True)
```
๊ทธ๋ฌ๋ ๋ชจ๋ธ ๊ฐ์ค์น๊ฐ ์์ ์์์ฒ๋ผ ๋ฐ๋์ ๋ณ๋์ ํ์ ํด๋์ ์ ์ฅ๋๋ ๊ฒ์ ์๋๋๋ค. ๋ชจ๋ ๊ฐ์ค์น๊ฐ ํ๋์ '.safetensors` ํ์ผ์ ์ ์ฅ๋๋ ๊ฒฝ์ฐ๋ ์์ต๋๋ค. ์ด ๊ฒฝ์ฐ ๊ฐ์ค์น๊ฐ Stable Diffusion ๊ฐ์ค์น์ธ ๊ฒฝ์ฐ [`~diffusers.loaders.FromCkptMixin.from_ckpt`] ๋ฉ์๋๋ฅผ ์ฌ์ฉํ์ฌ ํ์ผ์ ์ง์ ๋ก๋ํ ์ ์์ต๋๋ค:
```py
from diffusers import StableDiffusionPipeline
pipeline = StableDiffusionPipeline.from_ckpt(
"https://huggingface.co/WarriorMama777/OrangeMixs/blob/main/Models/AbyssOrangeMix/AbyssOrangeMix.safetensors"
)
```
## ์ธ์ดํํ
์๋ก ๋ณํ
ํ๋ธ์ ๋ชจ๋ ๊ฐ์ค์น๋ฅผ '.safetensors` ํ์์ผ๋ก ์ฌ์ฉํ ์ ์๋ ๊ฒ์ ์๋๋ฉฐ, '.bin`์ผ๋ก ์ ์ฅ๋ ๊ฐ์ค์น๊ฐ ์์ ์ ์์ต๋๋ค. ์ด ๊ฒฝ์ฐ [Convert Space](https://huggingface.co/spaces/diffusers/convert)์ ์ฌ์ฉํ์ฌ ๊ฐ์ค์น๋ฅผ '.safetensors'๋ก ๋ณํํ์ธ์. Convert Space๋ ํผํด๋ ๊ฐ์ค์น๋ฅผ ๋ค์ด๋ก๋ํ์ฌ ๋ณํํ ํ ํ ๋ฆฌํ์คํธ๋ฅผ ์ด์ด ํ๋ธ์ ์๋ก ๋ณํ๋ `.safetensors` ํ์ผ์ ์
๋ก๋ํฉ๋๋ค. ์ด๋ ๊ฒ ํ๋ฉด ํผํด๋ ํ์ผ์ ์
์ฑ ์ฝ๋๊ฐ ํฌํจ๋์ด ์๋ ๊ฒฝ์ฐ, ์์ ํ์ง ์์ ํ์ผ๊ณผ ์์ฌ์ค๋ฌ์ด ํผํด ๊ฐ์ ธ์ค๊ธฐ๋ฅผ ํ์งํ๋ [๋ณด์ ์ค์บ๋](https://huggingface.co/docs/hub/security-pickle#hubs-security-scanner)๊ฐ ์๋ ํ๋ธ๋ก ์
๋ก๋๋ฉ๋๋ค. - ๊ฐ๋ณ ์ปดํจํฐ๊ฐ ์๋.
๊ฐ์ ` ๋งค๊ฐ๋ณ์์ ํ ๋ฆฌํ์คํธ์ ๋ํ ์ฐธ์กฐ๋ฅผ ์ง์ ํ์ฌ ์๋ก์ด '.safetensors` ๊ฐ์ค์น๊ฐ ์ ์ฉ๋ ๋ชจ๋ธ์ ์ฌ์ฉํ ์ ์์ต๋๋ค(ํ๋ธ์ [Check PR](https://huggingface.co/spaces/diffusers/check_pr) ๊ณต๊ฐ์์ ํ
์คํธํ ์๋ ์์)(์: `refs/pr/22`):
```py
from diffusers import DiffusionPipeline
pipeline = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1", revision="refs/pr/22")
```
## ์ธ์ดํ์ผ์๋ฅผ ์ฌ์ฉํ๋ ์ด์ ๋ ๋ฌด์์ธ๊ฐ์?
์ธ์ดํํฐ ์ผ์๋ฅผ ์ฌ์ฉํ๋ ๋ฐ์๋ ์ฌ๋ฌ ๊ฐ์ง ์ด์ ๊ฐ ์์ต๋๋ค:
- ์ธ์ดํํ
์๋ฅผ ์ฌ์ฉํ๋ ๊ฐ์ฅ ํฐ ์ด์ ๋ ์์ ์
๋๋ค.์คํ ์์ค ๋ฐ ๋ชจ๋ธ ๋ฐฐํฌ๊ฐ ์ฆ๊ฐํจ์ ๋ฐ๋ผ ๋ค์ด๋ก๋ํ ๋ชจ๋ธ ๊ฐ์ค์น์ ์
์ฑ ์ฝ๋๊ฐ ํฌํจ๋์ด ์์ง ์๋ค๋ ๊ฒ์ ์ ๋ขฐํ ์ ์๋ ๊ฒ์ด ์ค์ํด์ก์ต๋๋ค.์ธ์ดํ์ผ์์ ํ์ฌ ํค๋ ํฌ๊ธฐ๋ ๋งค์ฐ ํฐ JSON ํ์ผ์ ๊ตฌ๋ฌธ ๋ถ์ํ์ง ๋ชปํ๊ฒ ํฉ๋๋ค.
- ๋ชจ๋ธ ์ ํ ๊ฐ์ ๋ก๋ฉ ์๋๋ ํ
์์ ์ ๋ก ์นดํผ๋ฅผ ์ํํ๋ ์ธ์ดํํ
์๋ฅผ ์ฌ์ฉํด์ผ ํ๋ ๋ ๋ค๋ฅธ ์ด์ ์
๋๋ค. ๊ฐ์ค์น๋ฅผ CPU(๊ธฐ๋ณธ๊ฐ)๋ก ๋ก๋ํ๋ ๊ฒฝ์ฐ 'ํผํด'์ ๋นํด ํนํ ๋น ๋ฅด๋ฉฐ, ๊ฐ์ค์น๋ฅผ GPU๋ก ์ง์ ๋ก๋ํ๋ ๊ฒฝ์ฐ์๋ ๋น ๋ฅด์ง๋ ์๋๋ผ๋ ๋น์ทํ๊ฒ ๋น ๋ฆ
๋๋ค. ๋ชจ๋ธ์ด ์ด๋ฏธ ๋ก๋๋ ๊ฒฝ์ฐ์๋ง ์ฑ๋ฅ ์ฐจ์ด๋ฅผ ๋๋ ์ ์์ผ๋ฉฐ, ๊ฐ์ค์น๋ฅผ ๋ค์ด๋ก๋ํ๊ฑฐ๋ ๋ชจ๋ธ์ ์ฒ์ ๋ก๋ํ๋ ๊ฒฝ์ฐ์๋ ์ฑ๋ฅ ์ฐจ์ด๋ฅผ ๋๋ผ์ง ๋ชปํ ๊ฒ์
๋๋ค.
์ ์ฒด ํ์ดํ๋ผ์ธ์ ๋ก๋ํ๋ ๋ฐ ๊ฑธ๋ฆฌ๋ ์๊ฐ์
๋๋ค:
```py
from diffusers import StableDiffusionPipeline
pipeline = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1")
"Loaded in safetensors 0:00:02.033658"
"Loaded in PyTorch 0:00:02.663379"
```
ํ์ง๋ง ์ค์ ๋ก 500MB์ ๋ชจ๋ธ ๊ฐ์ค์น๋ฅผ ๋ก๋ํ๋ ๋ฐ ๊ฑธ๋ฆฌ๋ ์๊ฐ์ ์ผ๋ง ๋์ง ์์ต๋๋ค:
```bash
safetensors: 3.4873ms
PyTorch: 172.7537ms
```
์ง์ฐ ๋ก๋ฉ์ ์ธ์ดํํ
์์์๋ ์ง์๋๋ฉฐ, ์ด๋ ๋ถ์ฐ ์ค์ ์์ ์ผ๋ถ ํ
์๋ง ๋ก๋ํ๋ ๋ฐ ์ ์ฉํฉ๋๋ค. ์ด ํ์์ ์ฌ์ฉํ๋ฉด [BLOOM](https://huggingface.co/bigscience/bloom) ๋ชจ๋ธ์ ์ผ๋ฐ PyTorch ๊ฐ์ค์น๋ฅผ ์ฌ์ฉํ์ฌ 10๋ถ์ด ๊ฑธ๋ฆฌ๋ ๊ฒ์ 8๊ฐ์ GPU์์ 45์ด ๋ง์ ๋ก๋ํ ์ ์์ต๋๋ค.
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/using-diffusers/loading.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# ํ์ดํ๋ผ์ธ, ๋ชจ๋ธ, ์ค์ผ์ค๋ฌ ๋ถ๋ฌ์ค๊ธฐ
๊ธฐ๋ณธ์ ์ผ๋ก diffusion ๋ชจ๋ธ์ ๋ค์ํ ์ปดํฌ๋ํธ๋ค(๋ชจ๋ธ, ํ ํฌ๋์ด์ , ์ค์ผ์ค๋ฌ) ๊ฐ์ ๋ณต์กํ ์ํธ์์ฉ์ ๊ธฐ๋ฐ์ผ๋ก ๋์ํฉ๋๋ค. ๋ํจ์ ์ค(Diffusers)๋ ์ด๋ฌํ diffusion ๋ชจ๋ธ์ ๋ณด๋ค ์ฝ๊ณ ๊ฐํธํ API๋ก ์ ๊ณตํ๋ ๊ฒ์ ๋ชฉํ๋ก ์ค๊ณ๋์์ต๋๋ค. [`DiffusionPipeline`]์ diffusion ๋ชจ๋ธ์ด ๊ฐ๋ ๋ณต์ก์ฑ์ ํ๋์ ํ์ดํ๋ผ์ธ API๋ก ํตํฉํ๊ณ , ๋์์ ์ด๋ฅผ ๊ตฌ์ฑํ๋ ๊ฐ๊ฐ์ ์ปดํฌ๋ํธ๋ค์ ํ์คํฌ์ ๋ง์ถฐ ์ ์ฐํ๊ฒ ์ปค์คํฐ๋ง์ด์งํ ์ ์๋๋ก ์ง์ํ๊ณ ์์ต๋๋ค.
diffusion ๋ชจ๋ธ์ ํ๋ จ๊ณผ ์ถ๋ก ์ ํ์ํ ๋ชจ๋ ๊ฒ์ [`DiffusionPipeline.from_pretrained`] ๋ฉ์๋๋ฅผ ํตํด ์ ๊ทผํ ์ ์์ต๋๋ค. (์ด ๋ง์ ์๋ฏธ๋ ๋ค์ ๋จ๋ฝ์์ ๋ณด๋ค ์์ธํ๊ฒ ๋ค๋ค๋ณด๋๋ก ํ๊ฒ ์ต๋๋ค.)
์ด ๋ฌธ์์์๋ ์ค๋ช
ํ ๋ด์ฉ์ ๋ค์๊ณผ ๊ฐ์ต๋๋ค.
* ํ๋ธ๋ฅผ ํตํด ํน์ ๋ก์ปฌ๋ก ํ์ดํ๋ผ์ธ์ ๋ถ๋ฌ์ค๋ ๋ฒ
* ํ์ดํ๋ผ์ธ์ ๋ค๋ฅธ ์ปดํฌ๋ํธ๋ค์ ์ ์ฉํ๋ ๋ฒ
* ์ค๋ฆฌ์ง๋ ์ฒดํฌํฌ์ธํธ๊ฐ ์๋ variant๋ฅผ ๋ถ๋ฌ์ค๋ ๋ฒ (variant๋ ๊ธฐ๋ณธ์ผ๋ก ์ค์ ๋ `fp32`๊ฐ ์๋ ๋ค๋ฅธ ๋ถ๋ ์์์ ํ์
(์: `fp16`)์ ์ฌ์ฉํ๊ฑฐ๋ Non-EMA ๊ฐ์ค์น๋ฅผ ์ฌ์ฉํ๋ ์ฒดํฌํฌ์ธํธ๋ค์ ์๋ฏธํฉ๋๋ค.)
* ๋ชจ๋ธ๊ณผ ์ค์ผ์ค๋ฌ๋ฅผ ๋ถ๋ฌ์ค๋ ๋ฒ
## Diffusion ํ์ดํ๋ผ์ธ
<Tip>
๐ก [`DiffusionPipeline`] ํด๋์ค๊ฐ ๋์ํ๋ ๋ฐฉ์์ ๋ณด๋ค ์์ธํ ๋ด์ฉ์ด ๊ถ๊ธํ๋ค๋ฉด, [DiffusionPipeline explained](#diffusionpipeline์-๋ํด-์์๋ณด๊ธฐ) ์น์
์ ํ์ธํด๋ณด์ธ์.
</Tip>
[`DiffusionPipeline`] ํด๋์ค๋ diffusion ๋ชจ๋ธ์ [ํ๋ธ](https://huggingface.co/models?library=diffusers)๋ก๋ถํฐ ๋ถ๋ฌ์ค๋ ๊ฐ์ฅ ์ฌํํ๋ฉด์ ๋ณดํธ์ ์ธ ๋ฐฉ์์
๋๋ค. [`DiffusionPipeline.from_pretrained`] ๋ฉ์๋๋ ์ ํฉํ ํ์ดํ๋ผ์ธ ํด๋์ค๋ฅผ ์๋์ผ๋ก ํ์งํ๊ณ , ํ์ํ ๊ตฌ์ฑ์์(configuration)์ ๊ฐ์ค์น(weight) ํ์ผ๋ค์ ๋ค์ด๋ก๋ํ๊ณ ์บ์ฑํ ๋ค์, ํด๋น ํ์ดํ๋ผ์ธ ์ธ์คํด์ค๋ฅผ ๋ฐํํฉ๋๋ค.
```python
from diffusers import DiffusionPipeline
repo_id = "runwayml/stable-diffusion-v1-5"
pipe = DiffusionPipeline.from_pretrained(repo_id)
```
๋ฌผ๋ก [`DiffusionPipeline`] ํด๋์ค๋ฅผ ์ฌ์ฉํ์ง ์๊ณ , ๋ช
์์ ์ผ๋ก ์ง์ ํด๋น ํ์ดํ๋ผ์ธ ํด๋์ค๋ฅผ ๋ถ๋ฌ์ค๋ ๊ฒ๋ ๊ฐ๋ฅํฉ๋๋ค. ์๋ ์์ ์ฝ๋๋ ์ ์์์ ๋์ผํ ์ธ์คํด์ค๋ฅผ ๋ฐํํฉ๋๋ค.
```python
from diffusers import StableDiffusionPipeline
repo_id = "runwayml/stable-diffusion-v1-5"
pipe = StableDiffusionPipeline.from_pretrained(repo_id)
```
[CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4)์ด๋ [runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) ๊ฐ์ ์ฒดํฌํฌ์ธํธ๋ค์ ๊ฒฝ์ฐ, ํ๋ ์ด์์ ๋ค์ํ ํ์คํฌ์ ํ์ฉ๋ ์ ์์ต๋๋ค. (์๋ฅผ ๋ค์ด ์์ ๋ ์ฒดํฌํฌ์ธํธ์ ๊ฒฝ์ฐ, text-to-image์ image-to-image์ ๋ชจ๋ ํ์ฉ๋ ์ ์์ต๋๋ค.) ๋ง์ฝ ์ด๋ฌํ ์ฒดํฌํฌ์ธํธ๋ค์ ๊ธฐ๋ณธ ์ค์ ํ์คํฌ๊ฐ ์๋ ๋ค๋ฅธ ํ์คํฌ์ ํ์ฉํ๊ณ ์ ํ๋ค๋ฉด, ํด๋น ํ์คํฌ์ ๋์๋๋ ํ์ดํ๋ผ์ธ(task-specific pipeline)์ ์ฌ์ฉํด์ผ ํฉ๋๋ค.
```python
from diffusers import StableDiffusionImg2ImgPipeline
repo_id = "runwayml/stable-diffusion-v1-5"
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(repo_id)
```
### ๋ก์ปฌ ํ์ดํ๋ผ์ธ
ํ์ดํ๋ผ์ธ์ ๋ก์ปฌ๋ก ๋ถ๋ฌ์ค๊ณ ์ ํ๋ค๋ฉด, `git-lfs`๋ฅผ ์ฌ์ฉํ์ฌ ์ง์ ์ฒดํฌํฌ์ธํธ๋ฅผ ๋ก์ปฌ ๋์คํฌ์ ๋ค์ด๋ก๋ ๋ฐ์์ผ ํฉ๋๋ค. ์๋์ ๋ช
๋ น์ด๋ฅผ ์คํํ๋ฉด `./stable-diffusion-v1-5`๋ ์ด๋ฆ์ผ๋ก ํด๋๊ฐ ๋ก์ปฌ๋์คํฌ์ ์์ฑ๋ฉ๋๋ค.
```bash
git lfs install
git clone https://huggingface.co/runwayml/stable-diffusion-v1-5
```
๊ทธ๋ฐ ๋ค์ ํด๋น ๋ก์ปฌ ๊ฒฝ๋ก๋ฅผ [`~DiffusionPipeline.from_pretrained`] ๋ฉ์๋์ ์ ๋ฌํฉ๋๋ค.
```python
from diffusers import DiffusionPipeline
repo_id = "./stable-diffusion-v1-5"
stable_diffusion = DiffusionPipeline.from_pretrained(repo_id)
```
์์ ์์์ฝ๋์ฒ๋ผ ๋ง์ฝ `repo_id`๊ฐ ๋ก์ปฌ ํจ์ค(local path)๋ผ๋ฉด, [`~DiffusionPipeline.from_pretrained`] ๋ฉ์๋๋ ์ด๋ฅผ ์๋์ผ๋ก ๊ฐ์งํ์ฌ ํ๋ธ์์ ํ์ผ์ ๋ค์ด๋ก๋ํ์ง ์์ต๋๋ค. ๋ง์ฝ ๋ก์ปฌ ๋์คํฌ์ ์ ์ฅ๋ ํ์ดํ๋ผ์ธ ์ฒดํฌํฌ์ธํธ๊ฐ ์ต์ ๋ฒ์ ์ด ์๋ ๊ฒฝ์ฐ์๋, ์ต์ ๋ฒ์ ์ ๋ค์ด๋ก๋ํ์ง ์๊ณ ๊ธฐ์กด ๋ก์ปฌ ๋์คํฌ์ ์ ์ฅ๋ ์ฒดํฌํฌ์ธํธ๋ฅผ ์ฌ์ฉํ๋ค๋ ๊ฒ์ ์๋ฏธํฉ๋๋ค.
### ํ์ดํ๋ผ์ธ ๋ด๋ถ์ ์ปดํฌ๋ํธ ๊ต์ฒดํ๊ธฐ
ํ์ดํ๋ผ์ธ ๋ด๋ถ์ ์ปดํฌ๋ํธ๋ค์ ํธํ ๊ฐ๋ฅํ ๋ค๋ฅธ ์ปดํฌ๋ํธ๋ก ๊ต์ฒด๋ ์ ์์ต๋๋ค. ์ด์ ๊ฐ์ ์ปดํฌ๋ํธ ๊ต์ฒด๊ฐ ์ค์ํ ์ด์ ๋ ๋ค์๊ณผ ๊ฐ์ต๋๋ค.
- ์ด๋ค ์ค์ผ์ค๋ฌ๋ฅผ ์ฌ์ฉํ ๊ฒ์ธ๊ฐ๋ ์์ฑ์๋์ ์์ฑํ์ง ๊ฐ์ ํธ๋ ์ด๋์คํ๋ฅผ ์ ์ํ๋ ์ค์ํ ์์์
๋๋ค.
- diffusion ๋ชจ๋ธ ๋ด๋ถ์ ์ปดํฌ๋ํธ๋ค์ ์ผ๋ฐ์ ์ผ๋ก ๊ฐ๊ฐ ๋
๋ฆฝ์ ์ผ๋ก ํ๋ จ๋๊ธฐ ๋๋ฌธ์, ๋ ์ข์ ์ฑ๋ฅ์ ๋ณด์ฌ์ฃผ๋ ์ปดํฌ๋ํธ๊ฐ ์๋ค๋ฉด ๊ทธ๊ฑธ๋ก ๊ต์ฒดํ๋ ์์ผ๋ก ์ฑ๋ฅ์ ํฅ์์ํฌ ์ ์์ต๋๋ค.
- ํ์ธ ํ๋ ๋จ๊ณ์์๋ ์ผ๋ฐ์ ์ผ๋ก UNet ํน์ ํ
์คํธ ์ธ์ฝ๋์ ๊ฐ์ ์ผ๋ถ ์ปดํฌ๋ํธ๋ค๋ง ํ๋ จํ๊ฒ ๋ฉ๋๋ค.
์ด๋ค ์ค์ผ์ค๋ฌ๋ค์ด ํธํ๊ฐ๋ฅํ์ง๋ `compatibles` ์์ฑ์ ํตํด ํ์ธํ ์ ์์ต๋๋ค.
```python
from diffusers import DiffusionPipeline
repo_id = "runwayml/stable-diffusion-v1-5"
stable_diffusion = DiffusionPipeline.from_pretrained(repo_id)
stable_diffusion.scheduler.compatibles
```
์ด๋ฒ์๋ [`SchedulerMixin.from_pretrained`] ๋ฉ์๋๋ฅผ ์ฌ์ฉํด์, ๊ธฐ์กด ๊ธฐ๋ณธ ์ค์ผ์ค๋ฌ์๋ [`PNDMScheduler`]๋ฅผ ๋ณด๋ค ์ฐ์ํ ์ฑ๋ฅ์ [`EulerDiscreteScheduler`]๋ก ๋ฐ๊ฟ๋ด
์๋ค. ์ค์ผ์ค๋ฌ๋ฅผ ๋ก๋ํ ๋๋ `subfolder` ์ธ์๋ฅผ ํตํด, ํด๋น ํ์ดํ๋ผ์ธ์ ๋ฆฌํฌ์งํ ๋ฆฌ์์ [์ค์ผ์ค๋ฌ์ ๊ดํ ํ์ํด๋](https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/main/scheduler)๋ฅผ ๋ช
์ํด์ฃผ์ด์ผ ํฉ๋๋ค.
๊ทธ ๋ค์ ์๋กญ๊ฒ ์์ฑํ [`EulerDiscreteScheduler`] ์ธ์คํด์ค๋ฅผ [`DiffusionPipeline`]์ `scheduler` ์ธ์์ ์ ๋ฌํฉ๋๋ค.
```python
from diffusers import DiffusionPipeline, EulerDiscreteScheduler, DPMSolverMultistepScheduler
repo_id = "runwayml/stable-diffusion-v1-5"
scheduler = EulerDiscreteScheduler.from_pretrained(repo_id, subfolder="scheduler")
stable_diffusion = DiffusionPipeline.from_pretrained(repo_id, scheduler=scheduler)
```
### ์ธ์ดํํฐ ์ฒด์ปค
์คํ
์ด๋ธ diffusion๊ณผ ๊ฐ์ diffusion ๋ชจ๋ธ๋ค์ ์ ํดํ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ ์๋ ์์ต๋๋ค. ์ด๋ฅผ ์๋ฐฉํ๊ธฐ ์ํด ๋ํจ์ ์ค๋ ์์ฑ๋ ์ด๋ฏธ์ง์ ์ ํด์ฑ์ ํ๋จํ๋ [์ธ์ดํํฐ ์ฒด์ปค(safety checker)](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py) ๊ธฐ๋ฅ์ ์ง์ํ๊ณ ์์ต๋๋ค. ๋ง์ฝ ์ธ์ดํํฐ ์ฒด์ปค์ ์ฌ์ฉ์ ์ํ์ง ์๋๋ค๋ฉด, `safety_checker` ์ธ์์ `None`์ ์ ๋ฌํด์ฃผ์๋ฉด ๋ฉ๋๋ค.
```python
from diffusers import DiffusionPipeline
repo_id = "runwayml/stable-diffusion-v1-5"
stable_diffusion = DiffusionPipeline.from_pretrained(repo_id, safety_checker=None)
```
### ์ปดํฌ๋ํธ ์ฌ์ฌ์ฉ
๋ณต์์ ํ์ดํ๋ผ์ธ์ ๋์ผํ ๋ชจ๋ธ์ด ๋ฐ๋ณต์ ์ผ๋ก ์ฌ์ฉํ๋ค๋ฉด, ๊ตณ์ด ํด๋น ๋ชจ๋ธ์ ๋์ผํ ๊ฐ์ค์น๋ฅผ ์ค๋ณต์ผ๋ก RAM์ ๋ถ๋ฌ์ฌ ํ์๋ ์์ ๊ฒ์
๋๋ค. [`~DiffusionPipeline.components`] ์์ฑ์ ํตํด ํ์ดํ๋ผ์ธ ๋ด๋ถ์ ์ปดํฌ๋ํธ๋ค์ ์ฐธ์กฐํ ์ ์๋๋ฐ, ์ด๋ฒ ๋จ๋ฝ์์๋ ์ด๋ฅผ ํตํด ๋์ผํ ๋ชจ๋ธ ๊ฐ์ค์น๋ฅผ RAM์ ์ค๋ณต์ผ๋ก ๋ถ๋ฌ์ค๋ ๊ฒ์ ๋ฐฉ์งํ๋ ๋ฒ์ ๋ํด ์์๋ณด๊ฒ ์ต๋๋ค.
```python
from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline
model_id = "runwayml/stable-diffusion-v1-5"
stable_diffusion_txt2img = StableDiffusionPipeline.from_pretrained(model_id)
components = stable_diffusion_txt2img.components
```
๊ทธ ๋ค์ ์ ์์ ์ฝ๋์์ ์ ์ธํ `components` ๋ณ์๋ฅผ ๋ค๋ฅธ ํ์ดํ๋ผ์ธ์ ์ ๋ฌํจ์ผ๋ก์จ, ๋ชจ๋ธ์ ๊ฐ์ค์น๋ฅผ ์ค๋ณต์ผ๋ก RAM์ ๋ก๋ฉํ์ง ์๊ณ , ๋์ผํ ์ปดํฌ๋ํธ๋ฅผ ์ฌ์ฌ์ฉํ ์ ์์ต๋๋ค.
```python
stable_diffusion_img2img = StableDiffusionImg2ImgPipeline(**components)
```
๋ฌผ๋ก ๊ฐ๊ฐ์ ์ปดํฌ๋ํธ๋ค์ ๋ฐ๋ก ๋ฐ๋ก ํ์ดํ๋ผ์ธ์ ์ ๋ฌํ ์๋ ์์ต๋๋ค. ์๋ฅผ ๋ค์ด `stable_diffusion_txt2img` ํ์ดํ๋ผ์ธ ์์ ์ปดํฌ๋ํธ๋ค ๊ฐ์ด๋ฐ์ ์ธ์ดํํฐ ์ฒด์ปค(`safety_checker`)์ ํผ์ณ ์ต์คํธ๋ํฐ(`feature_extractor`)๋ฅผ ์ ์ธํ ์ปดํฌ๋ํธ๋ค๋ง `stable_diffusion_img2img` ํ์ดํ๋ผ์ธ์์ ์ฌ์ฌ์ฉํ๋ ๋ฐฉ์ ์ญ์ ๊ฐ๋ฅํฉ๋๋ค.
```python
from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline
model_id = "runwayml/stable-diffusion-v1-5"
stable_diffusion_txt2img = StableDiffusionPipeline.from_pretrained(model_id)
stable_diffusion_img2img = StableDiffusionImg2ImgPipeline(
vae=stable_diffusion_txt2img.vae,
text_encoder=stable_diffusion_txt2img.text_encoder,
tokenizer=stable_diffusion_txt2img.tokenizer,
unet=stable_diffusion_txt2img.unet,
scheduler=stable_diffusion_txt2img.scheduler,
safety_checker=None,
feature_extractor=None,
requires_safety_checker=False,
)
```
## Checkpoint variants
Variant๋ ์ผ๋ฐ์ ์ผ๋ก ๋ค์๊ณผ ๊ฐ์ ์ฒดํฌํฌ์ธํธ๋ค์ ์๋ฏธํฉ๋๋ค.
- `torch.float16`๊ณผ ๊ฐ์ด ์ ๋ฐ๋๋ ๋ ๋ฎ์ง๋ง, ์ฉ๋ ์ญ์ ๋ ์์ ๋ถ๋์์์ ํ์
์ ๊ฐ์ค์น๋ฅผ ์ฌ์ฉํ๋ ์ฒดํฌํฌ์ธํธ. *(๋ค๋ง ์ด์ ๊ฐ์ variant์ ๊ฒฝ์ฐ, ์ถ๊ฐ์ ์ธ ํ๋ จ๊ณผ CPUํ๊ฒฝ์์์ ๊ตฌ๋์ด ๋ถ๊ฐ๋ฅํฉ๋๋ค.)*
- Non-EMA ๊ฐ์ค์น๋ฅผ ์ฌ์ฉํ๋ ์ฒดํฌํฌ์ธํธ. *(Non-EMA ๊ฐ์ค์น์ ๊ฒฝ์ฐ, ํ์ธ ํ๋ ๋จ๊ณ์์ ์ฌ์ฉํ๋ ๊ฒ์ด ๊ถ์ฅ๋๋๋ฐ, ์ถ๋ก ๋จ๊ณ์์ ์ฌ์ฉํ์ง ์๋ ๊ฒ์ด ๊ถ์ฅ๋ฉ๋๋ค.)*
<Tip>
๐ก ๋ชจ๋ธ ๊ตฌ์กฐ๋ ๋์ผํ์ง๋ง ์๋ก ๋ค๋ฅธ ํ์ต ํ๊ฒฝ์์ ์๋ก ๋ค๋ฅธ ๋ฐ์ดํฐ์
์ผ๋ก ํ์ต๋ ์ฒดํฌํฌ์ธํธ๋ค์ด ์์ ๊ฒฝ์ฐ, ํด๋น ์ฒดํฌํฌ์ธํธ๋ค์ variant ๋จ๊ณ๊ฐ ์๋ ๋ฆฌํฌ์งํ ๋ฆฌ ๋จ๊ณ์์ ๋ถ๋ฆฌ๋์ด ๊ด๋ฆฌ๋์ด์ผ ํฉ๋๋ค. (์ฆ, ํด๋น ์ฒดํฌํฌ์ธํธ๋ค์ ์๋ก ๋ค๋ฅธ ๋ฆฌํฌ์งํ ๋ฆฌ์์ ๋ฐ๋ก ๊ด๋ฆฌ๋์ด์ผ ํฉ๋๋ค. ์์: [`stable-diffusion-v1-4`], [`stable-diffusion-v1-5`]).
</Tip>
| **checkpoint type** | **weight name** | **argument for loading weights** |
| ------------------- | ----------------------------------- | -------------------------------- |
| original | diffusion_pytorch_model.bin | |
| floating point | diffusion_pytorch_model.fp16.bin | `variant`, `torch_dtype` |
| non-EMA | diffusion_pytorch_model.non_ema.bin | `variant` |
variant๋ฅผ ๋ก๋ํ ๋ 2๊ฐ์ ์ค์ํ argument๊ฐ ์์ต๋๋ค.
* `torch_dtype`์ ๋ถ๋ฌ์ฌ ์ฒดํฌํฌ์ธํธ์ ๋ถ๋์์์ ์ ์ ์ํฉ๋๋ค. ์๋ฅผ ๋ค์ด `torch_dtype=torch.float16`์ ๋ช
์ํจ์ผ๋ก์จ ๊ฐ์ค์น์ ๋ถ๋์์์ ํ์
์ `fl16`์ผ๋ก ๋ณํํ ์ ์์ต๋๋ค. (๋ง์ฝ ๋ฐ๋ก ์ค์ ํ์ง ์์ ๊ฒฝ์ฐ, ๊ธฐ๋ณธ๊ฐ์ผ๋ก `fp32` ํ์
์ ๊ฐ์ค์น๊ฐ ๋ก๋ฉ๋ฉ๋๋ค.) ๋ํ `variant` ์ธ์๋ฅผ ๋ช
์ํ์ง ์์ ์ฑ๋ก ์ฒดํฌํฌ์ธํธ๋ฅผ ๋ถ๋ฌ์จ ๋ค์, ํด๋น ์ฒดํฌํฌ์ธํธ๋ฅผ `torch_dtype=torch.float16` ์ธ์๋ฅผ ํตํด `fp16` ํ์
์ผ๋ก ๋ณํํ๋ ๊ฒ ์ญ์ ๊ฐ๋ฅํฉ๋๋ค. ์ด ๊ฒฝ์ฐ ๊ธฐ๋ณธ์ผ๋ก ์ค์ ๋ `fp32` ๊ฐ์ค์น๊ฐ ๋จผ์ ๋ค์ด๋ก๋๋๊ณ , ํด๋น ๊ฐ์ค์น๋ค์ ๋ถ๋ฌ์จ ๋ค์ `fp16` ํ์
์ผ๋ก ๋ณํํ๊ฒ ๋ฉ๋๋ค.
* `variant` ์ธ์๋ ๋ฆฌํฌ์งํ ๋ฆฌ์์ ์ด๋ค variant๋ฅผ ๋ถ๋ฌ์ฌ ๊ฒ์ธ๊ฐ๋ฅผ ์ ์ํฉ๋๋ค. ๊ฐ๋ น [`diffusers/stable-diffusion-variants`](https://huggingface.co/diffusers/stable-diffusion-variants/tree/main/unet) ๋ฆฌํฌ์งํ ๋ฆฌ๋ก๋ถํฐ `non_ema` ์ฒดํฌํฌ์ธํธ๋ฅผ ๋ถ๋ฌ์ค๊ณ ์ ํ๋ค๋ฉด, `variant="non_ema"` ์ธ์๋ฅผ ์ ๋ฌํด์ผ ํฉ๋๋ค.
```python
from diffusers import DiffusionPipeline
# load fp16 variant
stable_diffusion = DiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", variant="fp16", torch_dtype=torch.float16
)
# load non_ema variant
stable_diffusion = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", variant="non_ema")
```
๋ค๋ฅธ ๋ถ๋์์์ ํ์
์ ๊ฐ์ค์น ํน์ non-EMA ๊ฐ์ค์น๋ฅผ ์ฌ์ฉํ๋ ์ฒดํฌํฌ์ธํธ๋ฅผ ์ ์ฅํ๊ธฐ ์ํด์๋, [`DiffusionPipeline.save_pretrained`] ๋ฉ์๋๋ฅผ ์ฌ์ฉํด์ผ ํ๋ฉฐ, ์ด ๋ `variant` ์ธ์๋ฅผ ๋ช
์ํด์ค์ผ ํฉ๋๋ค. ์๋์ ์ฒดํฌํฌ์ธํธ์ ๋์ผํ ํด๋์ variant๋ฅผ ์ ์ฅํด์ผ ํ๋ฉฐ, ์ด๋ ๊ฒ ํ๋ฉด ๋์ผํ ํด๋์์ ์ค๋ฆฌ์ง๋ ์ฒดํฌํฌ์ธํธ๊ณผ variant๋ฅผ ๋ชจ๋ ๋ถ๋ฌ์ฌ ์ ์์ต๋๋ค.
```python
from diffusers import DiffusionPipeline
# save as fp16 variant
stable_diffusion.save_pretrained("runwayml/stable-diffusion-v1-5", variant="fp16")
# save as non-ema variant
stable_diffusion.save_pretrained("runwayml/stable-diffusion-v1-5", variant="non_ema")
```
๋ง์ฝ variant๋ฅผ ๊ธฐ์กด ํด๋์ ์ ์ฅํ์ง ์์ ๊ฒฝ์ฐ, `variant` ์ธ์๋ฅผ ๋ฐ๋์ ๋ช
์ํด์ผ ํฉ๋๋ค. ๊ทธ๋ ๊ฒ ํ์ง ์์ ๊ฒฝ์ฐ ์๋์ ์ค๋ฆฌ์ง๋ ์ฒดํฌํฌ์ธํธ๋ฅผ ์ฐพ์ ์ ์๊ฒ ๋๊ธฐ ๋๋ฌธ์ ์๋ฌ๊ฐ ๋ฐ์ํฉ๋๋ค.
```python
# ๐ this won't work
stable_diffusion = DiffusionPipeline.from_pretrained("./stable-diffusion-v1-5", torch_dtype=torch.float16)
# ๐ this works
stable_diffusion = DiffusionPipeline.from_pretrained(
"./stable-diffusion-v1-5", variant="fp16", torch_dtype=torch.float16
)
```
### ๋ชจ๋ธ ๋ถ๋ฌ์ค๊ธฐ
๋ชจ๋ธ๋ค์ [`ModelMixin.from_pretrained`] ๋ฉ์๋๋ฅผ ํตํด ๋ถ๋ฌ์ฌ ์ ์์ต๋๋ค. ํด๋น ๋ฉ์๋๋ ์ต์ ๋ฒ์ ์ ๋ชจ๋ธ ๊ฐ์ค์น ํ์ผ๊ณผ ์ค์ ํ์ผ(configurations)์ ๋ค์ด๋ก๋ํ๊ณ ์บ์ฑํฉ๋๋ค. ๋ง์ฝ ์ด๋ฌํ ํ์ผ๋ค์ด ์ต์ ๋ฒ์ ์ผ๋ก ๋ก์ปฌ ์บ์์ ์ ์ฅ๋์ด ์๋ค๋ฉด, [`ModelMixin.from_pretrained`]๋ ๊ตณ์ด ํด๋น ํ์ผ๋ค์ ๋ค์ ๋ค์ด๋ก๋ํ์ง ์์ผ๋ฉฐ, ๊ทธ์ ์บ์์ ์๋ ์ต์ ํ์ผ๋ค์ ์ฌ์ฌ์ฉํฉ๋๋ค.
๋ชจ๋ธ์ `subfolder` ์ธ์์ ๋ช
์๋ ํ์ ํด๋๋ก๋ถํฐ ๋ก๋๋ฉ๋๋ค. ์๋ฅผ ๋ค์ด `runwayml/stable-diffusion-v1-5`์ UNet ๋ชจ๋ธ์ ๊ฐ์ค์น๋ [`unet`](https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/main/unet) ํด๋์ ์ ์ฅ๋์ด ์์ต๋๋ค.
```python
from diffusers import UNet2DConditionModel
repo_id = "runwayml/stable-diffusion-v1-5"
model = UNet2DConditionModel.from_pretrained(repo_id, subfolder="unet")
```
ํน์ [ํด๋น ๋ชจ๋ธ์ ๋ฆฌํฌ์งํ ๋ฆฌ](https://huggingface.co/google/ddpm-cifar10-32/tree/main)๋ก๋ถํฐ ๋ค์ด๋ ํธ๋ก ๊ฐ์ ธ์ค๋ ๊ฒ ์ญ์ ๊ฐ๋ฅํฉ๋๋ค.
```python
from diffusers import UNet2DModel
repo_id = "google/ddpm-cifar10-32"
model = UNet2DModel.from_pretrained(repo_id)
```
๋ํ ์์ ๋ดค๋ `variant` ์ธ์๋ฅผ ๋ช
์ํจ์ผ๋ก์จ, Non-EMA๋ `fp16`์ ๊ฐ์ค์น๋ฅผ ๊ฐ์ ธ์ค๋ ๊ฒ ์ญ์ ๊ฐ๋ฅํฉ๋๋ค.
```python
from diffusers import UNet2DConditionModel
model = UNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="unet", variant="non-ema")
model.save_pretrained("./local-unet", variant="non-ema")
```
### ์ค์ผ์ค๋ฌ
์ค์ผ์ค๋ฌ๋ค์ [`SchedulerMixin.from_pretrained`] ๋ฉ์๋๋ฅผ ํตํด ๋ถ๋ฌ์ฌ ์ ์์ต๋๋ค. ๋ชจ๋ธ๊ณผ ๋ฌ๋ฆฌ ์ค์ผ์ค๋ฌ๋ ๋ณ๋์ ๊ฐ์ค์น๋ฅผ ๊ฐ์ง ์์ผ๋ฉฐ, ๋ฐ๋ผ์ ๋น์ฐํ ๋ณ๋์ ํ์ต๊ณผ์ ์ ์๊ตฌํ์ง ์์ต๋๋ค. ์ด๋ฌํ ์ค์ผ์ค๋ฌ๋ค์ (ํด๋น ์ค์ผ์ค๋ฌ ํ์ํด๋์) configration ํ์ผ์ ํตํด ์ ์๋ฉ๋๋ค.
์ฌ๋ฌ๊ฐ์ ์ค์ผ์ค๋ฌ๋ฅผ ๋ถ๋ฌ์จ๋ค๊ณ ํด์ ๋ง์ ๋ฉ๋ชจ๋ฆฌ๋ฅผ ์๋ชจํ๋ ๊ฒ์ ์๋๋ฉฐ, ๋ค์ํ ์ค์ผ์ค๋ฌ๋ค์ ๋์ผํ ์ค์ผ์ค๋ฌ configration์ ์ ์ฉํ๋ ๊ฒ ์ญ์ ๊ฐ๋ฅํฉ๋๋ค. ๋ค์ ์์ ์ฝ๋์์ ๋ถ๋ฌ์ค๋ ์ค์ผ์ค๋ฌ๋ค์ ๋ชจ๋ [`StableDiffusionPipeline`]๊ณผ ํธํ๋๋๋ฐ, ์ด๋ ๊ณง ํด๋น ์ค์ผ์ค๋ฌ๋ค์ ๋์ผํ ์ค์ผ์ค๋ฌ configration ํ์ผ์ ์ ์ฉํ ์ ์์์ ์๋ฏธํฉ๋๋ค.
```python
from diffusers import StableDiffusionPipeline
from diffusers import (
DDPMScheduler,
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
)
repo_id = "runwayml/stable-diffusion-v1-5"
ddpm = DDPMScheduler.from_pretrained(repo_id, subfolder="scheduler")
ddim = DDIMScheduler.from_pretrained(repo_id, subfolder="scheduler")
pndm = PNDMScheduler.from_pretrained(repo_id, subfolder="scheduler")
lms = LMSDiscreteScheduler.from_pretrained(repo_id, subfolder="scheduler")
euler_anc = EulerAncestralDiscreteScheduler.from_pretrained(repo_id, subfolder="scheduler")
euler = EulerDiscreteScheduler.from_pretrained(repo_id, subfolder="scheduler")
dpm = DPMSolverMultistepScheduler.from_pretrained(repo_id, subfolder="scheduler")
# replace `dpm` with any of `ddpm`, `ddim`, `pndm`, `lms`, `euler_anc`, `euler`
pipeline = StableDiffusionPipeline.from_pretrained(repo_id, scheduler=dpm)
```
### DiffusionPipeline์ ๋ํด ์์๋ณด๊ธฐ
ํด๋์ค ๋ฉ์๋๋ก์ [`DiffusionPipeline.from_pretrained`]์ 2๊ฐ์ง๋ฅผ ๋ด๋นํฉ๋๋ค.
- ์ฒซ์งธ๋ก, `from_pretrained` ๋ฉ์๋๋ ์ต์ ๋ฒ์ ์ ํ์ดํ๋ผ์ธ์ ๋ค์ด๋ก๋ํ๊ณ , ์บ์์ ์ ์ฅํฉ๋๋ค. ์ด๋ฏธ ๋ก์ปฌ ์บ์์ ์ต์ ๋ฒ์ ์ ํ์ดํ๋ผ์ธ์ด ์ ์ฅ๋์ด ์๋ค๋ฉด, [`DiffusionPipeline.from_pretrained`]์ ํด๋น ํ์ผ๋ค์ ๋ค์ ๋ค์ด๋ก๋ํ์ง ์๊ณ , ๋ก์ปฌ ์บ์์ ์ ์ฅ๋์ด ์๋ ํ์ดํ๋ผ์ธ์ ๋ถ๋ฌ์ต๋๋ค.
- `model_index.json` ํ์ผ์ ํตํด ์ฒดํฌํฌ์ธํธ์ ๋์๋๋ ์ ํฉํ ํ์ดํ๋ผ์ธ ํด๋์ค๋ก ๋ถ๋ฌ์ต๋๋ค.
ํ์ดํ๋ผ์ธ์ ํด๋ ๊ตฌ์กฐ๋ ํด๋น ํ์ดํ๋ผ์ธ ํด๋์ค์ ๊ตฌ์กฐ์ ์ง์ ์ ์ผ๋ก ์ผ์นํฉ๋๋ค. ์๋ฅผ ๋ค์ด [`StableDiffusionPipeline`] ํด๋์ค๋ [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) ๋ฆฌํฌ์งํ ๋ฆฌ์ ๋์๋๋ ๊ตฌ์กฐ๋ฅผ ๊ฐ์ต๋๋ค.
```python
from diffusers import DiffusionPipeline
repo_id = "runwayml/stable-diffusion-v1-5"
pipeline = DiffusionPipeline.from_pretrained(repo_id)
print(pipeline)
```
์์ ์ฝ๋ ์ถ๋ ฅ ๊ฒฐ๊ณผ๋ฅผ ํ์ธํด๋ณด๋ฉด, `pipeline`์ [`StableDiffusionPipeline`]์ ์ธ์คํด์ค์ด๋ฉฐ, ๋ค์๊ณผ ๊ฐ์ด ์ด 7๊ฐ์ ์ปดํฌ๋ํธ๋ก ๊ตฌ์ฑ๋๋ค๋ ๊ฒ์ ์ ์ ์์ต๋๋ค.
- `"feature_extractor"`: [`~transformers.CLIPFeatureExtractor`]์ ์ธ์คํด์ค
- `"safety_checker"`: ์ ํดํ ์ปจํ
์ธ ๋ฅผ ์คํฌ๋ฆฌ๋ํ๊ธฐ ์ํ [์ปดํฌ๋ํธ](https://github.com/huggingface/diffusers/blob/e55687e1e15407f60f32242027b7bb8170e58266/src/diffusers/pipelines/stable_diffusion/safety_checker.py#L32)
- `"scheduler"`: [`PNDMScheduler`]์ ์ธ์คํด์ค
- `"text_encoder"`: [`~transformers.CLIPTextModel`]์ ์ธ์คํด์ค
- `"tokenizer"`: a [`~transformers.CLIPTokenizer`]์ ์ธ์คํด์ค
- `"unet"`: [`UNet2DConditionModel`]์ ์ธ์คํด์ค
- `"vae"` [`AutoencoderKL`]์ ์ธ์คํด์ค
```json
StableDiffusionPipeline {
"feature_extractor": [
"transformers",
"CLIPImageProcessor"
],
"safety_checker": [
"stable_diffusion",
"StableDiffusionSafetyChecker"
],
"scheduler": [
"diffusers",
"PNDMScheduler"
],
"text_encoder": [
"transformers",
"CLIPTextModel"
],
"tokenizer": [
"transformers",
"CLIPTokenizer"
],
"unet": [
"diffusers",
"UNet2DConditionModel"
],
"vae": [
"diffusers",
"AutoencoderKL"
]
}
```
ํ์ดํ๋ผ์ธ ์ธ์คํด์ค์ ์ปดํฌ๋ํธ๋ค์ [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5)์ ํด๋ ๊ตฌ์กฐ์ ๋น๊ตํด๋ณผ ๊ฒฝ์ฐ, ๊ฐ๊ฐ์ ์ปดํฌ๋ํธ๋ง๋ค ๋ณ๋์ ํด๋๊ฐ ์์์ ํ์ธํ ์ ์์ต๋๋ค.
```
.
โโโ feature_extractor
โ โโโ preprocessor_config.json
โโโ model_index.json
โโโ safety_checker
โ โโโ config.json
โ โโโ pytorch_model.bin
โโโ scheduler
โ โโโ scheduler_config.json
โโโ text_encoder
โ โโโ config.json
โ โโโ pytorch_model.bin
โโโ tokenizer
โ โโโ merges.txt
โ โโโ special_tokens_map.json
โ โโโ tokenizer_config.json
โ โโโ vocab.json
โโโ unet
โ โโโ config.json
โ โโโ diffusion_pytorch_model.bin
โโโ vae
โโโ config.json
โโโ diffusion_pytorch_model.bin
```
๋ํ ๊ฐ๊ฐ์ ์ปดํฌ๋ํธ๋ค์ ํ์ดํ๋ผ์ธ ์ธ์คํด์ค์ ์์ฑ์ผ๋ก์จ ์ฐธ์กฐํ ์ ์์ต๋๋ค.
```py
pipeline.tokenizer
```
```python
CLIPTokenizer(
name_or_path="/root/.cache/huggingface/hub/models--runwayml--stable-diffusion-v1-5/snapshots/39593d5650112b4cc580433f6b0435385882d819/tokenizer",
vocab_size=49408,
model_max_length=77,
is_fast=False,
padding_side="right",
truncation_side="right",
special_tokens={
"bos_token": AddedToken("<|startoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True),
"eos_token": AddedToken("<|endoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True),
"unk_token": AddedToken("<|endoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True),
"pad_token": "<|endoftext|>",
},
)
```
๋ชจ๋ ํ์ดํ๋ผ์ธ์ `model_index.json` ํ์ผ์ ํตํด [`DiffusionPipeline`]์ ๋ค์๊ณผ ๊ฐ์ ์ ๋ณด๋ฅผ ์ ๋ฌํฉ๋๋ค.
- `_class_name` ๋ ์ด๋ค ํ์ดํ๋ผ์ธ ํด๋์ค๋ฅผ ์ฌ์ฉํด์ผ ํ๋์ง์ ๋ํด ์๋ ค์ค๋๋ค.
- `_diffusers_version`๋ ์ด๋ค ๋ฒ์ ์ ๋ํจ์ ์ค๋ก ํ์ดํ๋ผ์ธ ์์ ๋ชจ๋ธ๋ค์ด ๋ง๋ค์ด์ก๋์ง๋ฅผ ์๋ ค์ค๋๋ค.
- ๊ทธ ๋ค์์ ๊ฐ๊ฐ์ ์ปดํฌ๋ํธ๋ค์ด ์ด๋ค ๋ผ์ด๋ธ๋ฌ๋ฆฌ์ ์ด๋ค ํด๋์ค๋ก ๋ง๋ค์ด์ก๋์ง์ ๋ํด ์๋ ค์ค๋๋ค. (์๋ ์์์์ `"feature_extractor" : ["transformers", "CLIPImageProcessor"]`์ ๊ฒฝ์ฐ, `feature_extractor` ์ปดํฌ๋ํธ๋ `transformers` ๋ผ์ด๋ธ๋ฌ๋ฆฌ์ `CLIPImageProcessor` ํด๋์ค๋ฅผ ํตํด ๋ง๋ค์ด์ก๋ค๋ ๊ฒ์ ์๋ฏธํฉ๋๋ค.)
```json
{
"_class_name": "StableDiffusionPipeline",
"_diffusers_version": "0.6.0",
"feature_extractor": [
"transformers",
"CLIPImageProcessor"
],
"safety_checker": [
"stable_diffusion",
"StableDiffusionSafetyChecker"
],
"scheduler": [
"diffusers",
"PNDMScheduler"
],
"text_encoder": [
"transformers",
"CLIPTextModel"
],
"tokenizer": [
"transformers",
"CLIPTokenizer"
],
"unet": [
"diffusers",
"UNet2DConditionModel"
],
"vae": [
"diffusers",
"AutoencoderKL"
]
}
```
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/using-diffusers/pipeline_overview.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Overview
ํ์ดํ๋ผ์ธ์ ๋
๋ฆฝ์ ์ผ๋ก ํ๋ จ๋ ๋ชจ๋ธ๊ณผ ์ค์ผ์ค๋ฌ๋ฅผ ํจ๊ป ๋ชจ์์ ์ถ๋ก ์ ์ํด diffusion ์์คํ
์ ๋น ๋ฅด๊ณ ์ฝ๊ฒ ์ฌ์ฉํ ์ ์๋ ๋ฐฉ๋ฒ์ ์ ๊ณตํ๋ end-to-end ํด๋์ค์
๋๋ค. ๋ชจ๋ธ๊ณผ ์ค์ผ์ค๋ฌ์ ํน์ ์กฐํฉ์ ํน์ํ ๊ธฐ๋ฅ๊ณผ ํจ๊ป [`StableDiffusionPipeline`] ๋๋ [`StableDiffusionControlNetPipeline`]๊ณผ ๊ฐ์ ํน์ ํ์ดํ๋ผ์ธ ์ ํ์ ์ ์ํฉ๋๋ค. ๋ชจ๋ ํ์ดํ๋ผ์ธ ์ ํ์ ๊ธฐ๋ณธ [`DiffusionPipeline`] ํด๋์ค์์ ์์๋ฉ๋๋ค. ์ด๋ ์ฒดํฌํฌ์ธํธ๋ฅผ ์ ๋ฌํ๋ฉด, ํ์ดํ๋ผ์ธ ์ ํ์ ์๋์ผ๋ก ๊ฐ์งํ๊ณ ํ์ํ ๊ตฌ์ฑ ์์๋ค์ ๋ถ๋ฌ์ต๋๋ค.
์ด ์น์
์์๋ unconditional ์ด๋ฏธ์ง ์์ฑ, text-to-image ์์ฑ์ ๋ค์ํ ํ
ํฌ๋๊ณผ ๋ณํ๋ฅผ ํ์ดํ๋ผ์ธ์์ ์ง์ํ๋ ์์
๋ค์ ์๊ฐํฉ๋๋ค. ํ๋กฌํํธ์ ์๋ ํน์ ๋จ์ด๊ฐ ์ถ๋ ฅ์ ์ํฅ์ ๋ฏธ์น๋ ๊ฒ์ ์กฐ์ ํ๊ธฐ ์ํด ์ฌํ์ฑ์ ์ํ ์๋ ์ค์ ๊ณผ ํ๋กฌํํธ์ ๊ฐ์ค์น๋ฅผ ๋ถ์ฌํ๋ ๊ฒ์ผ๋ก ์์ฑ ํ๋ก์ธ์ค๋ฅผ ๋ ์ ์ ์ดํ๋ ๋ฐฉ๋ฒ์ ๋ํด ๋ฐฐ์ธ ์ ์์ต๋๋ค. ๋ง์ง๋ง์ผ๋ก ์์ฑ์์๋ถํฐ ์ด๋ฏธ์ง ์์ฑ๊ณผ ๊ฐ์ ์ปค์คํ
์์
์ ์ํ ์ปค๋ฎค๋ํฐ ํ์ดํ๋ผ์ธ์ ๋ง๋๋ ๋ฐฉ๋ฒ์ ์ ์ ์์ต๋๋ค.
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/using-diffusers/reusing_seeds.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Deterministic(๊ฒฐ์ ์ ) ์์ฑ์ ํตํ ์ด๋ฏธ์ง ํ์ง ๊ฐ์
์์ฑ๋ ์ด๋ฏธ์ง์ ํ์ง์ ๊ฐ์ ํ๋ ์ผ๋ฐ์ ์ธ ๋ฐฉ๋ฒ์ *๊ฒฐ์ ์ batch(๋ฐฐ์น) ์์ฑ*์ ์ฌ์ฉํ๋ ๊ฒ์
๋๋ค. ์ด ๋ฐฉ๋ฒ์ ์ด๋ฏธ์ง batch(๋ฐฐ์น)๋ฅผ ์์ฑํ๊ณ ๋ ๋ฒ์งธ ์ถ๋ก ๋ผ์ด๋์์ ๋ ์์ธํ ํ๋กฌํํธ์ ํจ๊ป ๊ฐ์ ํ ์ด๋ฏธ์ง ํ๋๋ฅผ ์ ํํ๋ ๊ฒ์
๋๋ค. ํต์ฌ์ ์ผ๊ด ์ด๋ฏธ์ง ์์ฑ์ ์ํด ํ์ดํ๋ผ์ธ์ [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html#generator) ๋ชฉ๋ก์ ์ ๋ฌํ๊ณ , ๊ฐ `Generator`๋ฅผ ์๋์ ์ฐ๊ฒฐํ์ฌ ์ด๋ฏธ์ง์ ์ฌ์ฌ์ฉํ ์ ์๋๋ก ํ๋ ๊ฒ์
๋๋ค.
์๋ฅผ ๋ค์ด [`runwayml/stable-diffusion-v1-5`](runwayml/stable-diffusion-v1-5)๋ฅผ ์ฌ์ฉํ์ฌ ๋ค์ ํ๋กฌํํธ์ ์ฌ๋ฌ ๋ฒ์ ์ ์์ฑํด ๋ด
์๋ค.
```py
prompt = "Labrador in the style of Vermeer"
```
(๊ฐ๋ฅํ๋ค๋ฉด) ํ์ดํ๋ผ์ธ์ [`DiffusionPipeline.from_pretrained`]๋ก ์ธ์คํด์คํํ์ฌ GPU์ ๋ฐฐ์นํฉ๋๋ค.
```python
>>> from diffusers import DiffusionPipeline
>>> pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
>>> pipe = pipe.to("cuda")
```
์ด์ ๋ค ๊ฐ์ ์๋ก ๋ค๋ฅธ `Generator`๋ฅผ ์ ์ํ๊ณ ๊ฐ `Generator`์ ์๋(`0` ~ `3`)๋ฅผ ํ ๋นํ์ฌ ๋์ค์ ํน์ ์ด๋ฏธ์ง์ ๋ํด `Generator`๋ฅผ ์ฌ์ฌ์ฉํ ์ ์๋๋ก ํฉ๋๋ค.
```python
>>> import torch
>>> generator = [torch.Generator(device="cuda").manual_seed(i) for i in range(4)]
```
์ด๋ฏธ์ง๋ฅผ ์์ฑํ๊ณ ์ดํด๋ด
๋๋ค.
```python
>>> images = pipe(prompt, generator=generator, num_images_per_prompt=4).images
>>> images
```

์ด ์์ ์์๋ ์ฒซ ๋ฒ์งธ ์ด๋ฏธ์ง๋ฅผ ๊ฐ์ ํ์ง๋ง ์ค์ ๋ก๋ ์ํ๋ ๋ชจ๋ ์ด๋ฏธ์ง๋ฅผ ์ฌ์ฉํ ์ ์์ต๋๋ค(์ฌ์ง์ด ๋ ๊ฐ์ ๋์ด ์๋ ์ด๋ฏธ์ง๋!). ์ฒซ ๋ฒ์งธ ์ด๋ฏธ์ง์์๋ ์๋๊ฐ '0'์ธ '์์ฑ๊ธฐ'๋ฅผ ์ฌ์ฉํ๊ธฐ ๋๋ฌธ์ ๋ ๋ฒ์งธ ์ถ๋ก ๋ผ์ด๋์์๋ ์ด '์์ฑ๊ธฐ'๋ฅผ ์ฌ์ฌ์ฉํ ๊ฒ์
๋๋ค. ์ด๋ฏธ์ง์ ํ์ง์ ๊ฐ์ ํ๋ ค๋ฉด ํ๋กฌํํธ์ ๋ช ๊ฐ์ง ํ
์คํธ๋ฅผ ์ถ๊ฐํฉ๋๋ค:
```python
prompt = [prompt + t for t in [", highly realistic", ", artsy", ", trending", ", colorful"]]
generator = [torch.Generator(device="cuda").manual_seed(0) for i in range(4)]
```
์๋๊ฐ `0`์ธ ์ ๋๋ ์ดํฐ 4๊ฐ๋ฅผ ์์ฑํ๊ณ , ์ด์ ๋ผ์ด๋์ ์ฒซ ๋ฒ์งธ ์ด๋ฏธ์ง์ฒ๋ผ ๋ณด์ด๋ ๋ค๋ฅธ ์ด๋ฏธ์ง batch(๋ฐฐ์น)๋ฅผ ์์ฑํฉ๋๋ค!
```python
>>> images = pipe(prompt, generator=generator).images
>>> images
```

| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/training/adapt_a_model.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# ์๋ก์ด ์์
์ ๋ํ ๋ชจ๋ธ์ ์ ์ฉํ๊ธฐ
๋ง์ diffusion ์์คํ
์ ๊ฐ์ ๊ตฌ์ฑ ์์๋ค์ ๊ณต์ ํ๋ฏ๋ก ํ ์์
์ ๋ํด ์ฌ์ ํ์ต๋ ๋ชจ๋ธ์ ์์ ํ ๋ค๋ฅธ ์์
์ ์ ์ฉํ ์ ์์ต๋๋ค.
์ด ์ธํ์ธํ
์ ์ํ ๊ฐ์ด๋๋ ์ฌ์ ํ์ต๋ [`UNet2DConditionModel`]์ ์ํคํ
์ฒ๋ฅผ ์ด๊ธฐํํ๊ณ ์์ ํ์ฌ ์ฌ์ ํ์ต๋ text-to-image ๋ชจ๋ธ์ ์ด๋ป๊ฒ ์ธํ์ธํ
์ ์ ์ฉํ๋์ง๋ฅผ ์๋ ค์ค ๊ฒ์
๋๋ค.
## UNet2DConditionModel ํ๋ผ๋ฏธํฐ ๊ตฌ์ฑ
[`UNet2DConditionModel`]์ [input sample](https://huggingface.co/docs/diffusers/v0.16.0/en/api/models#diffusers.UNet2DConditionModel.in_channels)์์ 4๊ฐ์ ์ฑ๋์ ๊ธฐ๋ณธ์ ์ผ๋ก ํ์ฉํฉ๋๋ค. ์๋ฅผ ๋ค์ด, [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5)์ ๊ฐ์ ์ฌ์ ํ์ต๋ text-to-image ๋ชจ๋ธ์ ๋ถ๋ฌ์ค๊ณ `in_channels`์ ์๋ฅผ ํ์ธํฉ๋๋ค:
```py
from diffusers import StableDiffusionPipeline
pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
pipeline.unet.config["in_channels"]
4
```
์ธํ์ธํ
์ ์
๋ ฅ ์ํ์ 9๊ฐ์ ์ฑ๋์ด ํ์ํฉ๋๋ค. [`runwayml/stable-diffusion-inpainting`](https://huggingface.co/runwayml/stable-diffusion-inpainting)์ ๊ฐ์ ์ฌ์ ํ์ต๋ ์ธํ์ธํ
๋ชจ๋ธ์์ ์ด ๊ฐ์ ํ์ธํ ์ ์์ต๋๋ค:
```py
from diffusers import StableDiffusionPipeline
pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-inpainting")
pipeline.unet.config["in_channels"]
9
```
์ธํ์ธํ
์ ๋ํ text-to-image ๋ชจ๋ธ์ ์ ์ฉํ๊ธฐ ์ํด, `in_channels` ์๋ฅผ 4์์ 9๋ก ์์ ํด์ผ ํ ๊ฒ์
๋๋ค.
์ฌ์ ํ์ต๋ text-to-image ๋ชจ๋ธ์ ๊ฐ์ค์น์ [`UNet2DConditionModel`]์ ์ด๊ธฐํํ๊ณ `in_channels`๋ฅผ 9๋ก ์์ ํด ์ฃผ์ธ์. `in_channels`์ ์๋ฅผ ์์ ํ๋ฉด ํฌ๊ธฐ๊ฐ ๋ฌ๋ผ์ง๊ธฐ ๋๋ฌธ์ ํฌ๊ธฐ๊ฐ ์ ๋ง๋ ์ค๋ฅ๋ฅผ ํผํ๊ธฐ ์ํด `ignore_mismatched_sizes=True` ๋ฐ `low_cpu_mem_usage=False`๋ฅผ ์ค์ ํด์ผ ํฉ๋๋ค.
```py
from diffusers import UNet2DConditionModel
model_id = "runwayml/stable-diffusion-v1-5"
unet = UNet2DConditionModel.from_pretrained(
model_id, subfolder="unet", in_channels=9, low_cpu_mem_usage=False, ignore_mismatched_sizes=True
)
```
Text-to-image ๋ชจ๋ธ๋ก๋ถํฐ ๋ค๋ฅธ ๊ตฌ์ฑ ์์์ ์ฌ์ ํ์ต๋ ๊ฐ์ค์น๋ ์ฒดํฌํฌ์ธํธ๋ก๋ถํฐ ์ด๊ธฐํ๋์ง๋ง `unet`์ ์
๋ ฅ ์ฑ๋ ๊ฐ์ค์น (`conv_in.weight`)๋ ๋๋คํ๊ฒ ์ด๊ธฐํ๋ฉ๋๋ค. ๊ทธ๋ ์ง ์์ผ๋ฉด ๋ชจ๋ธ์ด ๋
ธ์ด์ฆ๋ฅผ ๋ฆฌํดํ๊ธฐ ๋๋ฌธ์ ์ธํ์ธํ
์ ๋ชจ๋ธ์ ํ์ธํ๋ ํ ๋ ์ค์ํฉ๋๋ค.
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/training/create_dataset.md
|
# ํ์ต์ ์ํ ๋ฐ์ดํฐ์
๋ง๋ค๊ธฐ
[Hub](https://huggingface.co/datasets?task_categories=task_categories:text-to-image&sort=downloads) ์๋ ๋ชจ๋ธ ๊ต์ก์ ์ํ ๋ง์ ๋ฐ์ดํฐ์
์ด ์์ง๋ง,
๊ด์ฌ์ด ์๊ฑฐ๋ ์ฌ์ฉํ๊ณ ์ถ์ ๋ฐ์ดํฐ์
์ ์ฐพ์ ์ ์๋ ๊ฒฝ์ฐ ๐ค [Datasets](hf.co/docs/datasets) ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ฅผ ์ฌ์ฉํ์ฌ ๋ฐ์ดํฐ์
์ ๋ง๋ค ์ ์์ต๋๋ค.
๋ฐ์ดํฐ์
๊ตฌ์กฐ๋ ๋ชจ๋ธ์ ํ์ตํ๋ ค๋ ์์
์ ๋ฐ๋ผ ๋ฌ๋ผ์ง๋๋ค.
๊ฐ์ฅ ๊ธฐ๋ณธ์ ์ธ ๋ฐ์ดํฐ์
๊ตฌ์กฐ๋ unconditional ์ด๋ฏธ์ง ์์ฑ๊ณผ ๊ฐ์ ์์
์ ์ํ ์ด๋ฏธ์ง ๋๋ ํ ๋ฆฌ์
๋๋ค.
๋ ๋ค๋ฅธ ๋ฐ์ดํฐ์
๊ตฌ์กฐ๋ ์ด๋ฏธ์ง ๋๋ ํ ๋ฆฌ์ text-to-image ์์ฑ๊ณผ ๊ฐ์ ์์
์ ํด๋นํ๋ ํ
์คํธ ์บก์
์ด ํฌํจ๋ ํ
์คํธ ํ์ผ์ผ ์ ์์ต๋๋ค.
์ด ๊ฐ์ด๋์๋ ํ์ธ ํ๋ํ ๋ฐ์ดํฐ์
์ ๋ง๋๋ ๋ ๊ฐ์ง ๋ฐฉ๋ฒ์ ์๊ฐํฉ๋๋ค:
- ์ด๋ฏธ์ง ํด๋๋ฅผ `--train_data_dir` ์ธ์์ ์ ๊ณตํฉ๋๋ค.
- ๋ฐ์ดํฐ์
์ Hub์ ์
๋ก๋ํ๊ณ ๋ฐ์ดํฐ์
๋ฆฌํฌ์งํ ๋ฆฌ id๋ฅผ `--dataset_name` ์ธ์์ ์ ๋ฌํฉ๋๋ค.
<Tip>
๐ก ํ์ต์ ์ฌ์ฉํ ์ด๋ฏธ์ง ๋ฐ์ดํฐ์
์ ๋ง๋๋ ๋ฐฉ๋ฒ์ ๋ํ ์์ธํ ๋ด์ฉ์ [์ด๋ฏธ์ง ๋ฐ์ดํฐ์
๋ง๋ค๊ธฐ](https://huggingface.co/docs/datasets/image_dataset) ๊ฐ์ด๋๋ฅผ ์ฐธ๊ณ ํ์ธ์.
</Tip>
## ํด๋ ํํ๋ก ๋ฐ์ดํฐ์
๊ตฌ์ถํ๊ธฐ
Unconditional ์์ฑ์ ์ํด ์ด๋ฏธ์ง ํด๋๋ก ์์ ์ ๋ฐ์ดํฐ์
์ ๊ตฌ์ถํ ์ ์์ต๋๋ค.
ํ์ต ์คํฌ๋ฆฝํธ๋ ๐ค Datasets์ [ImageFolder](https://huggingface.co/docs/datasets/en/image_dataset#imagefolder) ๋น๋๋ฅผ ์ฌ์ฉํ์ฌ
์๋์ผ๋ก ํด๋์์ ๋ฐ์ดํฐ์
์ ๊ตฌ์ถํฉ๋๋ค. ๋๋ ํ ๋ฆฌ ๊ตฌ์กฐ๋ ๋ค์๊ณผ ๊ฐ์์ผ ํฉ๋๋ค :
```bash
data_dir/xxx.png
data_dir/xxy.png
data_dir/[...]/xxz.png
```
๋ฐ์ดํฐ์
๋๋ ํฐ๋ฆฌ์ ๊ฒฝ๋ก๋ฅผ `--train_data_dir` ์ธ์๋ก ์ ๋ฌํ ๋ค์ ํ์ต์ ์์ํ ์ ์์ต๋๋ค:
```bash
accelerate launch train_unconditional.py \
# argument๋ก ํด๋ ์ง์ ํ๊ธฐ \
--train_data_dir <path-to-train-directory> \
<other-arguments>
```
## Hub์ ๋ฐ์ดํฐ ์ฌ๋ฆฌ๊ธฐ
<Tip>
๐ก ๋ฐ์ดํฐ์
์ ๋ง๋ค๊ณ Hub์ ์
๋ก๋ํ๋ ๊ฒ์ ๋ํ ์์ธํ ๋ด์ฉ์ [๐ค Datasets์ ์ฌ์ฉํ ์ด๋ฏธ์ง ๊ฒ์](https://huggingface.co/blog/image-search-datasets) ๊ฒ์๋ฌผ์ ์ฐธ๊ณ ํ์ธ์.
</Tip>
PIL ์ธ์ฝ๋ฉ๋ ์ด๋ฏธ์ง๊ฐ ํฌํจ๋ `์ด๋ฏธ์ง` ์ด์ ์์ฑํ๋ [์ด๋ฏธ์ง ํด๋](https://huggingface.co/docs/datasets/image_load#imagefolder) ๊ธฐ๋ฅ์ ์ฌ์ฉํ์ฌ ๋ฐ์ดํฐ์
์์ฑ์ ์์ํฉ๋๋ค.
`data_dir` ๋๋ `data_files` ๋งค๊ฐ ๋ณ์๋ฅผ ์ฌ์ฉํ์ฌ ๋ฐ์ดํฐ์
์ ์์น๋ฅผ ์ง์ ํ ์ ์์ต๋๋ค.
`data_files` ๋งค๊ฐ๋ณ์๋ ํน์ ํ์ผ์ `train` ์ด๋ `test` ๋ก ๋ถ๋ฆฌํ ๋ฐ์ดํฐ์
์ ๋งคํํ๋ ๊ฒ์ ์ง์ํฉ๋๋ค:
```python
from datasets import load_dataset
# ์์ 1: ๋ก์ปฌ ํด๋
dataset = load_dataset("imagefolder", data_dir="path_to_your_folder")
# ์์ 2: ๋ก์ปฌ ํ์ผ (์ง์ ํฌ๋งท : tar, gzip, zip, xz, rar, zstd)
dataset = load_dataset("imagefolder", data_files="path_to_zip_file")
# ์์ 3: ์๊ฒฉ ํ์ผ (์ง์ ํฌ๋งท : tar, gzip, zip, xz, rar, zstd)
dataset = load_dataset(
"imagefolder",
data_files="https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip",
)
# ์์ 4: ์ฌ๋ฌ๊ฐ๋ก ๋ถํ
dataset = load_dataset(
"imagefolder", data_files={"train": ["path/to/file1", "path/to/file2"], "test": ["path/to/file3", "path/to/file4"]}
)
```
[push_to_hub(https://huggingface.co/docs/datasets/v2.13.1/en/package_reference/main_classes#datasets.Dataset.push_to_hub) ์ ์ฌ์ฉํด์ Hub์ ๋ฐ์ดํฐ์
์ ์
๋ก๋ ํฉ๋๋ค:
```python
# ํฐ๋ฏธ๋์์ huggingface-cli login ์ปค๋งจ๋๋ฅผ ์ด๋ฏธ ์คํํ๋ค๊ณ ๊ฐ์ ํฉ๋๋ค
dataset.push_to_hub("name_of_your_dataset")
# ๊ฐ์ธ repo๋ก push ํ๊ณ ์ถ๋ค๋ฉด, `private=True` ์ ์ถ๊ฐํ์ธ์:
dataset.push_to_hub("name_of_your_dataset", private=True)
```
์ด์ ๋ฐ์ดํฐ์
์ด๋ฆ์ `--dataset_name` ์ธ์์ ์ ๋ฌํ์ฌ ๋ฐ์ดํฐ์
์ ํ์ต์ ์ฌ์ฉํ ์ ์์ต๋๋ค:
```bash
accelerate launch --mixed_precision="fp16" train_text_to_image.py \
--pretrained_model_name_or_path="runwayml/stable-diffusion-v1-5" \
--dataset_name="name_of_your_dataset" \
<other-arguments>
```
## ๋ค์ ๋จ๊ณ
๋ฐ์ดํฐ์
์ ์์ฑํ์ผ๋ ์ด์ ํ์ต ์คํฌ๋ฆฝํธ์ `train_data_dir` (๋ฐ์ดํฐ์
์ด ๋ก์ปฌ์ด๋ฉด) ํน์ `dataset_name` (Hub์ ๋ฐ์ดํฐ์
์ ์ฌ๋ ธ์ผ๋ฉด) ์ธ์์ ์ฐ๊ฒฐํ ์ ์์ต๋๋ค.
๋ค์ ๋จ๊ณ์์๋ ๋ฐ์ดํฐ์
์ ์ฌ์ฉํ์ฌ [unconditional ์์ฑ](https://huggingface.co/docs/diffusers/v0.18.2/en/training/unconditional_training) ๋๋ [ํ
์คํธ-์ด๋ฏธ์ง ์์ฑ](https://huggingface.co/docs/diffusers/training/text2image)์ ์ํ ๋ชจ๋ธ์ ํ์ต์์ผ๋ณด์ธ์!
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/training/controlnet.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# ControlNet
[Adding Conditional Control to Text-to-Image Diffusion Models](https://arxiv.org/abs/2302.05543) (ControlNet)์ Lvmin Zhang๊ณผ Maneesh Agrawala์ ์ํด ์ฐ์ฌ์ก์ต๋๋ค.
์ด ์์๋ [์๋ณธ ControlNet ๋ฆฌํฌ์งํ ๋ฆฌ์์ ์์ ํ์ตํ๊ธฐ](https://github.com/lllyasviel/ControlNet/blob/main/docs/train.md)์ ๊ธฐ๋ฐํฉ๋๋ค. ControlNet์ ์๋ค์ ์ฑ์ฐ๊ธฐ ์ํด [small synthetic dataset](https://huggingface.co/datasets/fusing/fill50k)์ ์ฌ์ฉํด์ ํ์ต๋ฉ๋๋ค.
## ์์กด์ฑ ์ค์นํ๊ธฐ
์๋์ ์คํฌ๋ฆฝํธ๋ฅผ ์คํํ๊ธฐ ์ ์, ๋ผ์ด๋ธ๋ฌ๋ฆฌ์ ํ์ต ์์กด์ฑ์ ์ค์นํด์ผ ํฉ๋๋ค.
<Tip warning={true}>
๊ฐ์ฅ ์ต์ ๋ฒ์ ์ ์์ ์คํฌ๋ฆฝํธ๋ฅผ ์ฑ๊ณต์ ์ผ๋ก ์คํํ๊ธฐ ์ํด์๋, ์์ค์์ ์ค์นํ๊ณ ์ต์ ๋ฒ์ ์ ์ค์น๋ฅผ ์ ์งํ๋ ๊ฒ์ ๊ฐ๋ ฅํ๊ฒ ์ถ์ฒํฉ๋๋ค. ์ฐ๋ฆฌ๋ ์์ ์คํฌ๋ฆฝํธ๋ค์ ์์ฃผ ์
๋ฐ์ดํธํ๊ณ ์์์ ๋ง์ถ ํน์ ํ ์๊ตฌ์ฌํญ์ ์ค์นํฉ๋๋ค.
</Tip>
์ ์ฌํญ์ ๋ง์กฑ์ํค๊ธฐ ์ํด์, ์๋ก์ด ๊ฐ์ํ๊ฒฝ์์ ๋ค์ ์ผ๋ จ์ ์คํ
์ ์คํํ์ธ์:
```bash
git clone https://github.com/huggingface/diffusers
cd diffusers
pip install -e .
```
๊ทธ ๋ค์์๋ [์์ ํด๋](https://github.com/huggingface/diffusers/tree/main/examples/controlnet)์ผ๋ก ์ด๋ํฉ๋๋ค.
```bash
cd examples/controlnet
```
์ด์ ์คํํ์ธ์:
```bash
pip install -r requirements.txt
```
[๐คAccelerate](https://github.com/huggingface/accelerate/) ํ๊ฒฝ์ ์ด๊ธฐํ ํฉ๋๋ค:
```bash
accelerate config
```
ํน์ ์ฌ๋ฌ๋ถ์ ํ๊ฒฝ์ด ๋ฌด์์ธ์ง ๋ชฐ๋ผ๋ ๊ธฐ๋ณธ์ ์ธ ๐คAccelerate ๊ตฌ์ฑ์ผ๋ก ์ด๊ธฐํํ ์ ์์ต๋๋ค:
```bash
accelerate config default
```
ํน์ ๋น์ ์ ํ๊ฒฝ์ด ๋
ธํธ๋ถ ๊ฐ์ ์ํธ์์ฉํ๋ ์์ ์ง์ํ์ง ์๋๋ค๋ฉด, ์๋์ ์ฝ๋๋ก ์ด๊ธฐํ ํ ์ ์์ต๋๋ค:
```python
from accelerate.utils import write_basic_config
write_basic_config()
```
## ์์ ์ฑ์ฐ๋ ๋ฐ์ดํฐ์
์๋ณธ ๋ฐ์ดํฐ์
์ ControlNet [repo](https://huggingface.co/lllyasviel/ControlNet/blob/main/training/fill50k.zip)์ ์ฌ๋ผ์์์ง๋ง, ์ฐ๋ฆฌ๋ [์ฌ๊ธฐ](https://huggingface.co/datasets/fusing/fill50k)์ ์๋กญ๊ฒ ๋ค์ ์ฌ๋ ค์ ๐ค Datasets ๊ณผ ํธํ๊ฐ๋ฅํฉ๋๋ค. ๊ทธ๋์ ํ์ต ์คํฌ๋ฆฝํธ ์์์ ๋ฐ์ดํฐ ๋ถ๋ฌ์ค๊ธฐ๋ฅผ ๋ค๋ฃฐ ์ ์์ต๋๋ค.
์ฐ๋ฆฌ์ ํ์ต ์์๋ ์๋ ControlNet์ ํ์ต์ ์ฐ์๋ [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5)์ ์ฌ์ฉํฉ๋๋ค. ๊ทธ๋ ์ง๋ง ControlNet์ ๋์๋๋ ์ด๋ Stable Diffusion ๋ชจ๋ธ([`CompVis/stable-diffusion-v1-4`](https://huggingface.co/CompVis/stable-diffusion-v1-4)) ํน์ [`stabilityai/stable-diffusion-2-1`](https://huggingface.co/stabilityai/stable-diffusion-2-1)์ ์ฆ๊ฐ๋ฅผ ์ํด ํ์ต๋ ์ ์์ต๋๋ค.
์์ฒด ๋ฐ์ดํฐ์
์ ์ฌ์ฉํ๊ธฐ ์ํด์๋ [ํ์ต์ ์ํ ๋ฐ์ดํฐ์
์์ฑํ๊ธฐ](create_dataset) ๊ฐ์ด๋๋ฅผ ํ์ธํ์ธ์.
## ํ์ต
์ด ํ์ต์ ์ฌ์ฉ๋ ๋ค์ ์ด๋ฏธ์ง๋ค์ ๋ค์ด๋ก๋ํ์ธ์:
```sh
wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png
wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png
```
`MODEL_NAME` ํ๊ฒฝ ๋ณ์ (Hub ๋ชจ๋ธ ๋ฆฌํฌ์งํ ๋ฆฌ ์์ด๋ ํน์ ๋ชจ๋ธ ๊ฐ์ค์น๊ฐ ์๋ ๋๋ ํ ๋ฆฌ๋ก ๊ฐ๋ ์ฃผ์)๋ฅผ ๋ช
์ํ๊ณ [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) ์ธ์๋ก ํ๊ฒฝ๋ณ์๋ฅผ ๋ณด๋
๋๋ค.
ํ์ต ์คํฌ๋ฆฝํธ๋ ๋น์ ์ ๋ฆฌํฌ์งํ ๋ฆฌ์ `diffusion_pytorch_model.bin` ํ์ผ์ ์์ฑํ๊ณ ์ ์ฅํฉ๋๋ค.
```bash
export MODEL_DIR="runwayml/stable-diffusion-v1-5"
export OUTPUT_DIR="path to save model"
accelerate launch train_controlnet.py \
--pretrained_model_name_or_path=$MODEL_DIR \
--output_dir=$OUTPUT_DIR \
--dataset_name=fusing/fill50k \
--resolution=512 \
--learning_rate=1e-5 \
--validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \
--validation_prompt "red circle with blue background" "cyan circle with brown floral background" \
--train_batch_size=4 \
--push_to_hub
```
์ด ๊ธฐ๋ณธ์ ์ธ ์ค์ ์ผ๋ก๋ ~38GB VRAM์ด ํ์ํฉ๋๋ค.
๊ธฐ๋ณธ์ ์ผ๋ก ํ์ต ์คํฌ๋ฆฝํธ๋ ๊ฒฐ๊ณผ๋ฅผ ํ
์๋ณด๋์ ๊ธฐ๋กํฉ๋๋ค. ๊ฐ์ค์น(weight)์ ํธํฅ(bias)์ ์ฌ์ฉํ๊ธฐ ์ํด `--report_to wandb` ๋ฅผ ์ ๋ฌํฉ๋๋ค.
๋ ์์ batch(๋ฐฐ์น) ํฌ๊ธฐ๋ก gradient accumulation(๊ธฐ์ธ๊ธฐ ๋์ )์ ํ๋ฉด ํ์ต ์๊ตฌ์ฌํญ์ ~20 GB VRAM์ผ๋ก ์ค์ผ ์ ์์ต๋๋ค.
```bash
export MODEL_DIR="runwayml/stable-diffusion-v1-5"
export OUTPUT_DIR="path to save model"
accelerate launch train_controlnet.py \
--pretrained_model_name_or_path=$MODEL_DIR \
--output_dir=$OUTPUT_DIR \
--dataset_name=fusing/fill50k \
--resolution=512 \
--learning_rate=1e-5 \
--validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \
--validation_prompt "red circle with blue background" "cyan circle with brown floral background" \
--train_batch_size=1 \
--gradient_accumulation_steps=4 \
--push_to_hub
```
## ์ฌ๋ฌ๊ฐ GPU๋ก ํ์ตํ๊ธฐ
`accelerate` ์ seamless multi-GPU ํ์ต์ ๊ณ ๋ คํฉ๋๋ค. `accelerate`๊ณผ ํจ๊ป ๋ถ์ฐ๋ ํ์ต์ ์คํํ๊ธฐ ์ํด [์ฌ๊ธฐ](https://huggingface.co/docs/accelerate/basic_tutorials/launch)
์ ์ค๋ช
์ ํ์ธํ์ธ์. ์๋๋ ์์ ๋ช
๋ น์ด์
๋๋ค:
```bash
export MODEL_DIR="runwayml/stable-diffusion-v1-5"
export OUTPUT_DIR="path to save model"
accelerate launch --mixed_precision="fp16" --multi_gpu train_controlnet.py \
--pretrained_model_name_or_path=$MODEL_DIR \
--output_dir=$OUTPUT_DIR \
--dataset_name=fusing/fill50k \
--resolution=512 \
--learning_rate=1e-5 \
--validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \
--validation_prompt "red circle with blue background" "cyan circle with brown floral background" \
--train_batch_size=4 \
--mixed_precision="fp16" \
--tracker_project_name="controlnet-demo" \
--report_to=wandb \
--push_to_hub
```
## ์์ ๊ฒฐ๊ณผ
#### ๋ฐฐ์น ์ฌ์ด์ฆ 8๋ก 300 ์คํ
์ดํ:
| | |
|-------------------|:-------------------------:|
| | ํธ๋ฅธ ๋ฐฐ๊ฒฝ๊ณผ ๋นจ๊ฐ ์ |
 |  |
| | ๊ฐ์ ๊ฝ ๋ฐฐ๊ฒฝ๊ณผ ์ฒญ๋ก์ ์ |
 |  |
#### ๋ฐฐ์น ์ฌ์ด์ฆ 8๋ก 6000 ์คํ
์ดํ:
| | |
|-------------------|:-------------------------:|
| | ํธ๋ฅธ ๋ฐฐ๊ฒฝ๊ณผ ๋นจ๊ฐ ์ |
 |  |
| | ๊ฐ์ ๊ฝ ๋ฐฐ๊ฒฝ๊ณผ ์ฒญ๋ก์ ์ |
 |  |
## 16GB GPU์์ ํ์ตํ๊ธฐ
16GB GPU์์ ํ์ตํ๊ธฐ ์ํด ๋ค์์ ์ต์ ํ๋ฅผ ์งํํ์ธ์:
- ๊ธฐ์ธ๊ธฐ ์ฒดํฌํฌ์ธํธ ์ ์ฅํ๊ธฐ
- bitsandbyte์ [8-bit optimizer](https://github.com/TimDettmers/bitsandbytes#requirements--installation)๊ฐ ์ค์น๋์ง ์์๋ค๋ฉด ๋งํฌ์ ์ฐ๊ฒฐ๋ ์ค๋ช
์๋ฅผ ๋ณด์ธ์.
์ด์ ํ์ต ์คํฌ๋ฆฝํธ๋ฅผ ์์ํ ์ ์์ต๋๋ค:
```bash
export MODEL_DIR="runwayml/stable-diffusion-v1-5"
export OUTPUT_DIR="path to save model"
accelerate launch train_controlnet.py \
--pretrained_model_name_or_path=$MODEL_DIR \
--output_dir=$OUTPUT_DIR \
--dataset_name=fusing/fill50k \
--resolution=512 \
--learning_rate=1e-5 \
--validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \
--validation_prompt "red circle with blue background" "cyan circle with brown floral background" \
--train_batch_size=1 \
--gradient_accumulation_steps=4 \
--gradient_checkpointing \
--use_8bit_adam \
--push_to_hub
```
## 12GB GPU์์ ํ์ตํ๊ธฐ
12GB GPU์์ ์คํํ๊ธฐ ์ํด ๋ค์์ ์ต์ ํ๋ฅผ ์งํํ์ธ์:
- ๊ธฐ์ธ๊ธฐ ์ฒดํฌํฌ์ธํธ ์ ์ฅํ๊ธฐ
- bitsandbyte์ 8-bit [optimizer](https://github.com/TimDettmers/bitsandbytes#requirements--installation)(๊ฐ ์ค์น๋์ง ์์๋ค๋ฉด ๋งํฌ์ ์ฐ๊ฒฐ๋ ์ค๋ช
์๋ฅผ ๋ณด์ธ์)
- [xFormers](https://huggingface.co/docs/diffusers/training/optimization/xformers)(๊ฐ ์ค์น๋์ง ์์๋ค๋ฉด ๋งํฌ์ ์ฐ๊ฒฐ๋ ์ค๋ช
์๋ฅผ ๋ณด์ธ์)
- ๊ธฐ์ธ๊ธฐ๋ฅผ `None`์ผ๋ก ์ค์
```bash
export MODEL_DIR="runwayml/stable-diffusion-v1-5"
export OUTPUT_DIR="path to save model"
accelerate launch train_controlnet.py \
--pretrained_model_name_or_path=$MODEL_DIR \
--output_dir=$OUTPUT_DIR \
--dataset_name=fusing/fill50k \
--resolution=512 \
--learning_rate=1e-5 \
--validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \
--validation_prompt "red circle with blue background" "cyan circle with brown floral background" \
--train_batch_size=1 \
--gradient_accumulation_steps=4 \
--gradient_checkpointing \
--use_8bit_adam \
--enable_xformers_memory_efficient_attention \
--set_grads_to_none \
--push_to_hub
```
`pip install xformers`์ผ๋ก `xformers`์ ํ์คํ ์ค์นํ๊ณ `enable_xformers_memory_efficient_attention`์ ์ฌ์ฉํ์ธ์.
## 8GB GPU์์ ํ์ตํ๊ธฐ
์ฐ๋ฆฌ๋ ControlNet์ ์ง์ํ๊ธฐ ์ํ DeepSpeed๋ฅผ ์ฒ ์ ํ๊ฒ ํ
์คํธํ์ง ์์์ต๋๋ค. ํ๊ฒฝ์ค์ ์ด ๋ฉ๋ชจ๋ฆฌ๋ฅผ ์ ์ฅํ ๋,
๊ทธ ํ๊ฒฝ์ด ์ฑ๊ณต์ ์ผ๋ก ํ์ตํ๋์ง๋ฅผ ํ์ ํ์ง ์์์ต๋๋ค. ์ฑ๊ณตํ ํ์ต ์คํ์ ์ํด ์ค์ ์ ๋ณ๊ฒฝํด์ผ ํ ๊ฐ๋ฅ์ฑ์ด ๋์ต๋๋ค.
8GB GPU์์ ์คํํ๊ธฐ ์ํด ๋ค์์ ์ต์ ํ๋ฅผ ์งํํ์ธ์:
- ๊ธฐ์ธ๊ธฐ ์ฒดํฌํฌ์ธํธ ์ ์ฅํ๊ธฐ
- bitsandbyte์ 8-bit [optimizer](https://github.com/TimDettmers/bitsandbytes#requirements--installation)(๊ฐ ์ค์น๋์ง ์์๋ค๋ฉด ๋งํฌ์ ์ฐ๊ฒฐ๋ ์ค๋ช
์๋ฅผ ๋ณด์ธ์)
- [xFormers](https://huggingface.co/docs/diffusers/training/optimization/xformers)(๊ฐ ์ค์น๋์ง ์์๋ค๋ฉด ๋งํฌ์ ์ฐ๊ฒฐ๋ ์ค๋ช
์๋ฅผ ๋ณด์ธ์)
- ๊ธฐ์ธ๊ธฐ๋ฅผ `None`์ผ๋ก ์ค์
- DeepSpeed stage 2 ๋ณ์์ optimizer ์์๊ธฐ
- fp16 ํผํฉ ์ ๋ฐ๋(precision)
[DeepSpeed](https://www.deepspeed.ai/)๋ CPU ๋๋ NVME๋ก ํ
์๋ฅผ VRAM์์ ์คํ๋ก๋ํ ์ ์์ต๋๋ค.
์ด๋ฅผ ์ํด์ ํจ์ฌ ๋ ๋ง์ RAM(์ฝ 25 GB)๊ฐ ํ์ํฉ๋๋ค.
DeepSpeed stage 2๋ฅผ ํ์ฑํํ๊ธฐ ์ํด์ `accelerate config`๋ก ํ๊ฒฝ์ ๊ตฌ์ฑํด์ผํฉ๋๋ค.
๊ตฌ์ฑ(configuration) ํ์ผ์ ์ด๋ฐ ๋ชจ์ต์ด์ด์ผ ํฉ๋๋ค:
```yaml
compute_environment: LOCAL_MACHINE
deepspeed_config:
gradient_accumulation_steps: 4
offload_optimizer_device: cpu
offload_param_device: cpu
zero3_init_flag: false
zero_stage: 2
distributed_type: DEEPSPEED
```
<ํ>
[๋ฌธ์](https://huggingface.co/docs/accelerate/usage_guides/deepspeed)๋ฅผ ๋ ๋ง์ DeepSpeed ์ค์ ์ต์
์ ์ํด ๋ณด์ธ์.
<ํ>
๊ธฐ๋ณธ Adam optimizer๋ฅผ DeepSpeed'์ Adam
`deepspeed.ops.adam.DeepSpeedCPUAdam` ์ผ๋ก ๋ฐ๊พธ๋ฉด ์๋นํ ์๋ ํฅ์์ ์ด๋ฃฐ์ ์์ง๋ง,
Pytorch์ ๊ฐ์ ๋ฒ์ ์ CUDA toolchain์ด ํ์ํฉ๋๋ค. 8-๋นํธ optimizer๋ ํ์ฌ DeepSpeed์
ํธํ๋์ง ์๋ ๊ฒ ๊ฐ์ต๋๋ค.
```bash
export MODEL_DIR="runwayml/stable-diffusion-v1-5"
export OUTPUT_DIR="path to save model"
accelerate launch train_controlnet.py \
--pretrained_model_name_or_path=$MODEL_DIR \
--output_dir=$OUTPUT_DIR \
--dataset_name=fusing/fill50k \
--resolution=512 \
--validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \
--validation_prompt "red circle with blue background" "cyan circle with brown floral background" \
--train_batch_size=1 \
--gradient_accumulation_steps=4 \
--gradient_checkpointing \
--enable_xformers_memory_efficient_attention \
--set_grads_to_none \
--mixed_precision fp16 \
--push_to_hub
```
## ์ถ๋ก
ํ์ต๋ ๋ชจ๋ธ์ [`StableDiffusionControlNetPipeline`]๊ณผ ํจ๊ป ์คํ๋ ์ ์์ต๋๋ค.
`base_model_path`์ `controlnet_path` ์ ๊ฐ์ ์ง์ ํ์ธ์ `--pretrained_model_name_or_path` ์
`--output_dir` ๋ ํ์ต ์คํฌ๋ฆฝํธ์ ๊ฐ๋ณ์ ์ผ๋ก ์ง์ ๋ฉ๋๋ค.
```py
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
from diffusers.utils import load_image
import torch
base_model_path = "path to model"
controlnet_path = "path to controlnet"
controlnet = ControlNetModel.from_pretrained(controlnet_path, torch_dtype=torch.float16)
pipe = StableDiffusionControlNetPipeline.from_pretrained(
base_model_path, controlnet=controlnet, torch_dtype=torch.float16
)
# ๋ ๋น ๋ฅธ ์ค์ผ์ค๋ฌ์ ๋ฉ๋ชจ๋ฆฌ ์ต์ ํ๋ก diffusion ํ๋ก์ธ์ค ์๋ ์ฌ๋ฆฌ๊ธฐ
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
# xformers๊ฐ ์ค์น๋์ง ์์ผ๋ฉด ์๋ ์ค์ ์ญ์ ํ๊ธฐ
pipe.enable_xformers_memory_efficient_attention()
pipe.enable_model_cpu_offload()
control_image = load_image("./conditioning_image_1.png")
prompt = "pale golden rod circle with old lace background"
# ์ด๋ฏธ์ง ์์ฑํ๊ธฐ
generator = torch.manual_seed(0)
image = pipe(prompt, num_inference_steps=20, generator=generator, image=control_image).images[0]
image.save("./output.png")
```
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/training/text_inversion.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Textual-Inversion
[[open-in-colab]]
[textual-inversion](https://arxiv.org/abs/2208.01618)์ ์์์ ์์ ์ด๋ฏธ์ง์์ ์๋ก์ด ์ฝ์
ํธ๋ฅผ ํฌ์ฐฉํ๋ ๊ธฐ๋ฒ์
๋๋ค. ์ด ๊ธฐ์ ์ ์๋ [Latent Diffusion](https://github.com/CompVis/latent-diffusion)์์ ์์ฐ๋์์ง๋ง, ์ดํ [Stable Diffusion](https://huggingface.co/docs/diffusers/main/en/conceptual/stable_diffusion)๊ณผ ๊ฐ์ ์ ์ฌํ ๋ค๋ฅธ ๋ชจ๋ธ์๋ ์ ์ฉ๋์์ต๋๋ค. ํ์ต๋ ์ฝ์
ํธ๋ text-to-image ํ์ดํ๋ผ์ธ์์ ์์ฑ๋ ์ด๋ฏธ์ง๋ฅผ ๋ ์ ์ ์ดํ๋ ๋ฐ ์ฌ์ฉํ ์ ์์ต๋๋ค. ์ด ๋ชจ๋ธ์ ํ
์คํธ ์ธ์ฝ๋์ ์๋ฒ ๋ฉ ๊ณต๊ฐ์์ ์๋ก์ด '๋จ์ด'๋ฅผ ํ์ตํ์ฌ ๊ฐ์ธํ๋ ์ด๋ฏธ์ง ์์ฑ์ ์ํ ํ
์คํธ ํ๋กฌํํธ ๋ด์์ ์ฌ์ฉ๋ฉ๋๋ค.

<small>By using just 3-5 images you can teach new concepts to a model such as Stable Diffusion for personalized image generation <a href="https://github.com/rinongal/textual_inversion">(image source)</a>.</small>
์ด ๊ฐ์ด๋์์๋ textual-inversion์ผ๋ก [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) ๋ชจ๋ธ์ ํ์ตํ๋ ๋ฐฉ๋ฒ์ ์ค๋ช
ํฉ๋๋ค. ์ด ๊ฐ์ด๋์์ ์ฌ์ฉ๋ ๋ชจ๋ textual-inversion ํ์ต ์คํฌ๋ฆฝํธ๋ [์ฌ๊ธฐ](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion)์์ ํ์ธํ ์ ์์ต๋๋ค. ๋ด๋ถ์ ์ผ๋ก ์ด๋ป๊ฒ ์๋ํ๋์ง ์์ธํ ์ดํด๋ณด๊ณ ์ถ์ผ์๋ค๋ฉด ํด๋น ๋งํฌ๋ฅผ ์ฐธ์กฐํด์ฃผ์๊ธฐ ๋ฐ๋๋๋ค.
<Tip>
[Stable Diffusion Textual Inversion Concepts Library](https://huggingface.co/sd-concepts-library)์๋ ์ปค๋ฎค๋ํฐ์์ ์ ์ํ ํ์ต๋ textual-inversion ๋ชจ๋ธ๋ค์ด ์์ต๋๋ค. ์๊ฐ์ด ์ง๋จ์ ๋ฐ๋ผ ๋ ๋ง์ ์ฝ์
ํธ๋ค์ด ์ถ๊ฐ๋์ด ์ ์ฉํ ๋ฆฌ์์ค๋ก ์ฑ์ฅํ ๊ฒ์
๋๋ค!
</Tip>
์์ํ๊ธฐ ์ ์ ํ์ต์ ์ํ ์์กด์ฑ ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ค์ ์ค์นํด์ผ ํฉ๋๋ค:
```bash
pip install diffusers accelerate transformers
```
์์กด์ฑ ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ค์ ์ค์น๊ฐ ์๋ฃ๋๋ฉด, [๐คAccelerate](https://github.com/huggingface/accelerate/) ํ๊ฒฝ์ ์ด๊ธฐํ์ํต๋๋ค.
```bash
accelerate config
```
๋ณ๋์ ์ค์ ์์ด, ๊ธฐ๋ณธ ๐คAccelerate ํ๊ฒฝ์ ์ค์ ํ๋ ค๋ฉด ๋ค์๊ณผ ๊ฐ์ด ํ์ธ์:
```bash
accelerate config default
```
๋๋ ์ฌ์ฉ ์ค์ธ ํ๊ฒฝ์ด ๋
ธํธ๋ถ๊ณผ ๊ฐ์ ๋ํํ ์
ธ์ ์ง์ํ์ง ์๋๋ค๋ฉด, ๋ค์๊ณผ ๊ฐ์ด ์ฌ์ฉํ ์ ์์ต๋๋ค:
```py
from accelerate.utils import write_basic_config
write_basic_config()
```
๋ง์ง๋ง์ผ๋ก, Memory-Efficient Attention์ ํตํด ๋ฉ๋ชจ๋ฆฌ ์ฌ์ฉ๋์ ์ค์ด๊ธฐ ์ํด [xFormers](https://huggingface.co/docs/diffusers/main/en/training/optimization/xformers)๋ฅผ ์ค์นํฉ๋๋ค. xFormers๋ฅผ ์ค์นํ ํ, ํ์ต ์คํฌ๋ฆฝํธ์ `--enable_xformers_memory_efficient_attention` ์ธ์๋ฅผ ์ถ๊ฐํฉ๋๋ค. xFormers๋ Flax์์ ์ง์๋์ง ์์ต๋๋ค.
## ํ๋ธ์ ๋ชจ๋ธ ์
๋ก๋ํ๊ธฐ
๋ชจ๋ธ์ ํ๋ธ์ ์ ์ฅํ๋ ค๋ฉด, ํ์ต ์คํฌ๋ฆฝํธ์ ๋ค์ ์ธ์๋ฅผ ์ถ๊ฐํด์ผ ํฉ๋๋ค.
```bash
--push_to_hub
```
## ์ฒดํฌํฌ์ธํธ ์ ์ฅ ๋ฐ ๋ถ๋ฌ์ค๊ธฐ
ํ์ต์ค์ ๋ชจ๋ธ์ ์ฒดํฌํฌ์ธํธ๋ฅผ ์ ๊ธฐ์ ์ผ๋ก ์ ์ฅํ๋ ๊ฒ์ด ์ข์ต๋๋ค. ์ด๋ ๊ฒ ํ๋ฉด ์ด๋ค ์ด์ ๋ก๋ ํ์ต์ด ์ค๋จ๋ ๊ฒฝ์ฐ ์ ์ฅ๋ ์ฒดํฌํฌ์ธํธ์์ ํ์ต์ ๋ค์ ์์ํ ์ ์์ต๋๋ค. ํ์ต ์คํฌ๋ฆฝํธ์ ๋ค์ ์ธ์๋ฅผ ์ ๋ฌํ๋ฉด 500๋จ๊ณ๋ง๋ค ์ ์ฒด ํ์ต ์ํ๊ฐ `output_dir`์ ํ์ ํด๋์ ์ฒดํฌํฌ์ธํธ๋ก์ ์ ์ฅ๋ฉ๋๋ค.
```bash
--checkpointing_steps=500
```
์ ์ฅ๋ ์ฒดํฌํฌ์ธํธ์์ ํ์ต์ ์ฌ๊ฐํ๋ ค๋ฉด, ํ์ต ์คํฌ๋ฆฝํธ์ ์ฌ๊ฐํ ํน์ ์ฒดํฌํฌ์ธํธ์ ๋ค์ ์ธ์๋ฅผ ์ ๋ฌํ์ธ์.
```bash
--resume_from_checkpoint="checkpoint-1500"
```
## ํ์ธ ํ๋
ํ์ต์ฉ ๋ฐ์ดํฐ์
์ผ๋ก [๊ณ ์์ด ์ฅ๋๊ฐ ๋ฐ์ดํฐ์
](https://huggingface.co/datasets/diffusers/cat_toy_example)์ ๋ค์ด๋ก๋ํ์ฌ ๋๋ ํ ๋ฆฌ์ ์ ์ฅํ์ธ์. ์ฌ๋ฌ๋ถ๋ง์ ๊ณ ์ ํ ๋ฐ์ดํฐ์
์ ์ฌ์ฉํ๊ณ ์ ํ๋ค๋ฉด, [ํ์ต์ฉ ๋ฐ์ดํฐ์
๋ง๋ค๊ธฐ](https://huggingface.co/docs/diffusers/training/create_dataset) ๊ฐ์ด๋๋ฅผ ์ดํด๋ณด์๊ธฐ ๋ฐ๋๋๋ค.
```py
from huggingface_hub import snapshot_download
local_dir = "./cat"
snapshot_download(
"diffusers/cat_toy_example", local_dir=local_dir, repo_type="dataset", ignore_patterns=".gitattributes"
)
```
๋ชจ๋ธ์ ๋ฆฌํฌ์งํ ๋ฆฌ ID(๋๋ ๋ชจ๋ธ ๊ฐ์ค์น๊ฐ ํฌํจ๋ ๋๋ ํฐ๋ฆฌ ๊ฒฝ๋ก)๋ฅผ `MODEL_NAME` ํ๊ฒฝ ๋ณ์์ ํ ๋นํ๊ณ , ํด๋น ๊ฐ์ [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) ์ธ์์ ์ ๋ฌํฉ๋๋ค. ๊ทธ๋ฆฌ๊ณ ์ด๋ฏธ์ง๊ฐ ํฌํจ๋ ๋๋ ํฐ๋ฆฌ ๊ฒฝ๋ก๋ฅผ `DATA_DIR` ํ๊ฒฝ ๋ณ์์ ํ ๋นํฉ๋๋ค.
์ด์ [ํ์ต ์คํฌ๋ฆฝํธ](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion.py)๋ฅผ ์คํํ ์ ์์ต๋๋ค. ์คํฌ๋ฆฝํธ๋ ๋ค์ ํ์ผ์ ์์ฑํ๊ณ ๋ฆฌํฌ์งํ ๋ฆฌ์ ์ ์ฅํฉ๋๋ค.
- `learned_embeds.bin`
- `token_identifier.txt`
- `type_of_concept.txt`.
<Tip>
๐กV100 GPU 1๊ฐ๋ฅผ ๊ธฐ์ค์ผ๋ก ์ ์ฒด ํ์ต์๋ ์ต๋ 1์๊ฐ์ด ๊ฑธ๋ฆฝ๋๋ค. ํ์ต์ด ์๋ฃ๋๊ธฐ๋ฅผ ๊ธฐ๋ค๋ฆฌ๋ ๋์ ๊ถ๊ธํ ์ ์ด ์์ผ๋ฉด ์๋ ์น์
์์ [textual-inversion์ด ์ด๋ป๊ฒ ์๋ํ๋์ง](https://huggingface.co/docs/diffusers/training/text_inversion#how-it-works) ์์ ๋กญ๊ฒ ํ์ธํ์ธ์ !
</Tip>
<frameworkcontent>
<pt>
```bash
export MODEL_NAME="runwayml/stable-diffusion-v1-5"
export DATA_DIR="./cat"
accelerate launch textual_inversion.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--train_data_dir=$DATA_DIR \
--learnable_property="object" \
--placeholder_token="<cat-toy>" --initializer_token="toy" \
--resolution=512 \
--train_batch_size=1 \
--gradient_accumulation_steps=4 \
--max_train_steps=3000 \
--learning_rate=5.0e-04 --scale_lr \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--output_dir="textual_inversion_cat" \
--push_to_hub
```
<Tip>
๐กํ์ต ์ฑ๋ฅ์ ์ฌ๋ฆฌ๊ธฐ ์ํด, ํ๋ ์ด์คํ๋ ํ ํฐ(`<cat-toy>`)์ (๋จ์ผํ ์๋ฒ ๋ฉ ๋ฒกํฐ๊ฐ ์๋) ๋ณต์์ ์๋ฒ ๋ฉ ๋ฒกํฐ๋ก ํํํ๋ ๊ฒ ์ญ์ ๊ณ ๋ คํ ์์ต๋๋ค. ์ด๋ฌํ ํธ๋ฆญ์ด ๋ชจ๋ธ์ด ๋ณด๋ค ๋ณต์กํ ์ด๋ฏธ์ง์ ์คํ์ผ(์์ ๋งํ ์ฝ์
ํธ)์ ๋ ์ ์บก์ฒํ๋ ๋ฐ ๋์์ด ๋ ์ ์์ต๋๋ค. ๋ณต์์ ์๋ฒ ๋ฉ ๋ฒกํฐ ํ์ต์ ํ์ฑํํ๋ ค๋ฉด ๋ค์ ์ต์
์ ์ ๋ฌํ์ญ์์ค.
```bash
--num_vectors=5
```
</Tip>
</pt>
<jax>
TPU์ ์ก์ธ์คํ ์ ์๋ ๊ฒฝ์ฐ, [Flax ํ์ต ์คํฌ๋ฆฝํธ](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion_flax.py)๋ฅผ ์ฌ์ฉํ์ฌ ๋ ๋น ๋ฅด๊ฒ ๋ชจ๋ธ์ ํ์ต์์ผ๋ณด์ธ์. (๋ฌผ๋ก GPU์์๋ ์๋ํฉ๋๋ค.) ๋์ผํ ์ค์ ์์ Flax ํ์ต ์คํฌ๋ฆฝํธ๋ PyTorch ํ์ต ์คํฌ๋ฆฝํธ๋ณด๋ค ์ต์ 70% ๋ ๋นจ๋ผ์ผ ํฉ๋๋ค! โก๏ธ
์์ํ๊ธฐ ์์ Flax์ ๋ํ ์์กด์ฑ ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ค์ ์ค์นํด์ผ ํฉ๋๋ค.
```bash
pip install -U -r requirements_flax.txt
```
๋ชจ๋ธ์ ๋ฆฌํฌ์งํ ๋ฆฌ ID(๋๋ ๋ชจ๋ธ ๊ฐ์ค์น๊ฐ ํฌํจ๋ ๋๋ ํฐ๋ฆฌ ๊ฒฝ๋ก)๋ฅผ `MODEL_NAME` ํ๊ฒฝ ๋ณ์์ ํ ๋นํ๊ณ , ํด๋น ๊ฐ์ [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) ์ธ์์ ์ ๋ฌํฉ๋๋ค.
๊ทธ๋ฐ ๋ค์ [ํ์ต ์คํฌ๋ฆฝํธ](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion_flax.py)๋ฅผ ์์ํ ์ ์์ต๋๋ค.
```bash
export MODEL_NAME="duongna/stable-diffusion-v1-4-flax"
export DATA_DIR="./cat"
python textual_inversion_flax.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--train_data_dir=$DATA_DIR \
--learnable_property="object" \
--placeholder_token="<cat-toy>" --initializer_token="toy" \
--resolution=512 \
--train_batch_size=1 \
--max_train_steps=3000 \
--learning_rate=5.0e-04 --scale_lr \
--output_dir="textual_inversion_cat" \
--push_to_hub
```
</jax>
</frameworkcontent>
### ์ค๊ฐ ๋ก๊น
๋ชจ๋ธ์ ํ์ต ์งํ ์ํฉ์ ์ถ์ ํ๋ ๋ฐ ๊ด์ฌ์ด ์๋ ๊ฒฝ์ฐ, ํ์ต ๊ณผ์ ์์ ์์ฑ๋ ์ด๋ฏธ์ง๋ฅผ ์ ์ฅํ ์ ์์ต๋๋ค. ํ์ต ์คํฌ๋ฆฝํธ์ ๋ค์ ์ธ์๋ฅผ ์ถ๊ฐํ์ฌ ์ค๊ฐ ๋ก๊น
์ ํ์ฑํํฉ๋๋ค.
- `validation_prompt` : ์ํ์ ์์ฑํ๋ ๋ฐ ์ฌ์ฉ๋๋ ํ๋กฌํํธ(๊ธฐ๋ณธ๊ฐ์ `None`์ผ๋ก ์ค์ ๋๋ฉฐ, ์ด ๋ ์ค๊ฐ ๋ก๊น
์ ๋นํ์ฑํ๋จ)
- `num_validation_images` : ์์ฑํ ์ํ ์ด๋ฏธ์ง ์
- `validation_steps` : `validation_prompt`๋ก๋ถํฐ ์ํ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ๊ธฐ ์ ์คํ
์ ์
```bash
--validation_prompt="A <cat-toy> backpack"
--num_validation_images=4
--validation_steps=100
```
## ์ถ๋ก
๋ชจ๋ธ์ ํ์ตํ ํ์๋, ํด๋น ๋ชจ๋ธ์ [`StableDiffusionPipeline`]์ ์ฌ์ฉํ์ฌ ์ถ๋ก ์ ์ฌ์ฉํ ์ ์์ต๋๋ค.
textual-inversion ์คํฌ๋ฆฝํธ๋ ๊ธฐ๋ณธ์ ์ผ๋ก textual-inversion์ ํตํด ์ป์ด์ง ์๋ฒ ๋ฉ ๋ฒกํฐ๋ง์ ์ ์ฅํฉ๋๋ค. ํด๋น ์๋ฒ ๋ฉ ๋ฒกํฐ๋ค์ ํ
์คํธ ์ธ์ฝ๋์ ์๋ฒ ๋ฉ ํ๋ ฌ์ ์ถ๊ฐ๋์ด ์์ต์ต๋๋ค.
<frameworkcontent>
<pt>
<Tip>
๐ก ์ปค๋ฎค๋ํฐ๋ [sd-concepts-library](https://huggingface.co/sd-concepts-library) ๋ผ๋ ๋๊ท๋ชจ์ textual-inversion ์๋ฒ ๋ฉ ๋ฒกํฐ ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ฅผ ๋ง๋ค์์ต๋๋ค. textual-inversion ์๋ฒ ๋ฉ์ ๋ฐ๋ฐ๋ฅ๋ถํฐ ํ์ตํ๋ ๋์ , ํด๋น ๋ผ์ด๋ธ๋ฌ๋ฆฌ์ ๋ณธ์ธ์ด ์ฐพ๋ textual-inversion ์๋ฒ ๋ฉ์ด ์ด๋ฏธ ์ถ๊ฐ๋์ด ์์ง ์์์ง๋ฅผ ํ์ธํ๋ ๊ฒ๋ ์ข์ ๋ฐฉ๋ฒ์ด ๋ ๊ฒ ๊ฐ์ต๋๋ค.
</Tip>
textual-inversion ์๋ฒ ๋ฉ ๋ฒกํฐ์ ๋ถ๋ฌ์ค๊ธฐ ์ํด์๋, ๋จผ์ ํด๋น ์๋ฒ ๋ฉ ๋ฒกํฐ๋ฅผ ํ์ตํ ๋ ์ฌ์ฉํ ๋ชจ๋ธ์ ๋ถ๋ฌ์์ผ ํฉ๋๋ค. ์ฌ๊ธฐ์๋ [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/docs/diffusers/training/runwayml/stable-diffusion-v1-5) ๋ชจ๋ธ์ด ์ฌ์ฉ๋์๋ค๊ณ ๊ฐ์ ํ๊ณ ๋ถ๋ฌ์ค๊ฒ ์ต๋๋ค.
```python
from diffusers import StableDiffusionPipeline
import torch
model_id = "runwayml/stable-diffusion-v1-5"
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
```
๋ค์์ผ๋ก `TextualInversionLoaderMixin.load_textual_inversion` ํจ์๋ฅผ ํตํด, textual-inversion ์๋ฒ ๋ฉ ๋ฒกํฐ๋ฅผ ๋ถ๋ฌ์์ผ ํฉ๋๋ค. ์ฌ๊ธฐ์ ์ฐ๋ฆฌ๋ ์ด์ ์ `<cat-toy>` ์์ ์ ์๋ฒ ๋ฉ์ ๋ถ๋ฌ์ฌ ๊ฒ์
๋๋ค.
```python
pipe.load_textual_inversion("sd-concepts-library/cat-toy")
```
์ด์ ํ๋ ์ด์คํ๋ ํ ํฐ(`<cat-toy>`)์ด ์ ๋์ํ๋์ง๋ฅผ ํ์ธํ๋ ํ์ดํ๋ผ์ธ์ ์คํํ ์ ์์ต๋๋ค.
```python
prompt = "A <cat-toy> backpack"
image = pipe(prompt, num_inference_steps=50).images[0]
image.save("cat-backpack.png")
```
`TextualInversionLoaderMixin.load_textual_inversion`์ Diffusers ํ์์ผ๋ก ์ ์ฅ๋ ํ
์คํธ ์๋ฒ ๋ฉ ๋ฒกํฐ๋ฅผ ๋ก๋ํ ์ ์์ ๋ฟ๋ง ์๋๋ผ, [Automatic1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui) ํ์์ผ๋ก ์ ์ฅ๋ ์๋ฒ ๋ฉ ๋ฒกํฐ๋ ๋ก๋ํ ์ ์์ต๋๋ค. ์ด๋ ๊ฒ ํ๋ ค๋ฉด, ๋จผ์ [civitAI](https://civitai.com/models/3036?modelVersionId=8387)์์ ์๋ฒ ๋ฉ ๋ฒกํฐ๋ฅผ ๋ค์ด๋ก๋ํ ๋ค์ ๋ก์ปฌ์์ ๋ถ๋ฌ์์ผ ํฉ๋๋ค.
```python
pipe.load_textual_inversion("./charturnerv2.pt")
```
</pt>
<jax>
ํ์ฌ Flax์ ๋ํ `load_textual_inversion` ํจ์๋ ์์ต๋๋ค. ๋ฐ๋ผ์ ํ์ต ํ textual-inversion ์๋ฒ ๋ฉ ๋ฒกํฐ๊ฐ ๋ชจ๋ธ์ ์ผ๋ถ๋ก์ ์ ์ฅ๋์๋์ง๋ฅผ ํ์ธํด์ผ ํฉ๋๋ค. ๊ทธ๋ฐ ๋ค์์ ๋ค๋ฅธ Flax ๋ชจ๋ธ๊ณผ ๋ง์ฐฌ๊ฐ์ง๋ก ์คํํ ์ ์์ต๋๋ค.
```python
import jax
import numpy as np
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxStableDiffusionPipeline
model_path = "path-to-your-trained-model"
pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(model_path, dtype=jax.numpy.bfloat16)
prompt = "A <cat-toy> backpack"
prng_seed = jax.random.PRNGKey(0)
num_inference_steps = 50
num_samples = jax.device_count()
prompt = num_samples * [prompt]
prompt_ids = pipeline.prepare_inputs(prompt)
# shard inputs and rng
params = replicate(params)
prng_seed = jax.random.split(prng_seed, jax.device_count())
prompt_ids = shard(prompt_ids)
images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images
images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
image.save("cat-backpack.png")
```
</jax>
</frameworkcontent>
## ์๋ ๋ฐฉ์

<small>Architecture overview from the Textual Inversion <a href="https://textual-inversion.github.io/">blog post.</a></small>
์ผ๋ฐ์ ์ผ๋ก ํ
์คํธ ํ๋กฌํํธ๋ ๋ชจ๋ธ์ ์ ๋ฌ๋๊ธฐ ์ ์ ์๋ฒ ๋ฉ์ผ๋ก ํ ํฐํ๋ฉ๋๋ค. textual-inversion์ ๋น์ทํ ์์
์ ์ํํ์ง๋ง, ์ ๋ค์ด์ด๊ทธ๋จ์ ํน์ ํ ํฐ `S*`๋ก๋ถํฐ ์๋ก์ด ํ ํฐ ์๋ฒ ๋ฉ `v*`๋ฅผ ํ์ตํฉ๋๋ค. ๋ชจ๋ธ์ ์์ํ์ ๋ํจ์ ๋ชจ๋ธ์ ์กฐ์ ํ๋ ๋ฐ ์ฌ์ฉ๋๋ฉฐ, ๋ํจ์ ๋ชจ๋ธ์ด ๋จ ๋ช ๊ฐ์ ์์ ์ด๋ฏธ์ง์์ ์ ์ํ๊ณ ์๋ก์ด ์ฝ์
ํธ๋ฅผ ์ดํดํ๋ ๋ฐ ๋์์ ์ค๋๋ค.
์ด๋ฅผ ์ํด textual-inversion์ ์ ๋๋ ์ดํฐ ๋ชจ๋ธ๊ณผ ํ์ต์ฉ ์ด๋ฏธ์ง์ ๋
ธ์ด์ฆ ๋ฒ์ ์ ์ฌ์ฉํฉ๋๋ค. ์ ๋๋ ์ดํฐ๋ ๋
ธ์ด์ฆ๊ฐ ์ ์ ๋ฒ์ ์ ์ด๋ฏธ์ง๋ฅผ ์์ธกํ๋ ค๊ณ ์๋ํ๋ฉฐ ํ ํฐ ์๋ฒ ๋ฉ `v*`์ ์ ๋๋ ์ดํฐ์ ์ฑ๋ฅ์ ๋ฐ๋ผ ์ต์ ํ๋ฉ๋๋ค. ํ ํฐ ์๋ฒ ๋ฉ์ด ์๋ก์ด ์ฝ์
ํธ๋ฅผ ์ฑ๊ณต์ ์ผ๋ก ํฌ์ฐฉํ๋ฉด ๋ํจ์ ๋ชจ๋ธ์ ๋ ์ ์ฉํ ์ ๋ณด๋ฅผ ์ ๊ณตํ๊ณ ๋
ธ์ด์ฆ๊ฐ ์ ์ ๋ ์ ๋ช
ํ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ๋ ๋ฐ ๋์์ด ๋ฉ๋๋ค. ์ด๋ฌํ ์ต์ ํ ํ๋ก์ธ์ค๋ ์ผ๋ฐ์ ์ผ๋ก ๋ค์ํ ํ๋กฌํํธ์ ์ด๋ฏธ์ง์ ์์ฒ ๋ฒ์ ๋
ธ์ถ๋จ์ผ๋ก์จ ์ด๋ฃจ์ด์ง๋๋ค.
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/training/distributed_inference.md
|
# ์ฌ๋ฌ GPU๋ฅผ ์ฌ์ฉํ ๋ถ์ฐ ์ถ๋ก
๋ถ์ฐ ์ค์ ์์๋ ์ฌ๋ฌ ๊ฐ์ ํ๋กฌํํธ๋ฅผ ๋์์ ์์ฑํ ๋ ์ ์ฉํ ๐ค [Accelerate](https://huggingface.co/docs/accelerate/index) ๋๋ [PyTorch Distributed](https://pytorch.org/tutorials/beginner/dist_overview.html)๋ฅผ ์ฌ์ฉํ์ฌ ์ฌ๋ฌ GPU์์ ์ถ๋ก ์ ์คํํ ์ ์์ต๋๋ค.
์ด ๊ฐ์ด๋์์๋ ๋ถ์ฐ ์ถ๋ก ์ ์ํด ๐ค Accelerate์ PyTorch Distributed๋ฅผ ์ฌ์ฉํ๋ ๋ฐฉ๋ฒ์ ๋ณด์ฌ๋๋ฆฝ๋๋ค.
## ๐ค Accelerate
๐ค [Accelerate](https://huggingface.co/docs/accelerate/index)๋ ๋ถ์ฐ ์ค์ ์์ ์ถ๋ก ์ ์ฝ๊ฒ ํ๋ จํ๊ฑฐ๋ ์คํํ ์ ์๋๋ก ์ค๊ณ๋ ๋ผ์ด๋ธ๋ฌ๋ฆฌ์
๋๋ค. ๋ถ์ฐ ํ๊ฒฝ ์ค์ ํ๋ก์ธ์ค๋ฅผ ๊ฐ์ํํ์ฌ PyTorch ์ฝ๋์ ์ง์คํ ์ ์๋๋ก ํด์ค๋๋ค.
์์ํ๋ ค๋ฉด Python ํ์ผ์ ์์ฑํ๊ณ [`accelerate.PartialState`]๋ฅผ ์ด๊ธฐํํ์ฌ ๋ถ์ฐ ํ๊ฒฝ์ ์์ฑํ๋ฉด, ์ค์ ์ด ์๋์ผ๋ก ๊ฐ์ง๋๋ฏ๋ก `rank` ๋๋ `world_size`๋ฅผ ๋ช
์์ ์ผ๋ก ์ ์ํ ํ์๊ฐ ์์ต๋๋ค. ['DiffusionPipeline`]์ `distributed_state.device`๋ก ์ด๋ํ์ฌ ๊ฐ ํ๋ก์ธ์ค์ GPU๋ฅผ ํ ๋นํฉ๋๋ค.
์ด์ ์ปจํ
์คํธ ๊ด๋ฆฌ์๋ก [`~accelerate.PartialState.split_between_processes`] ์ ํธ๋ฆฌํฐ๋ฅผ ์ฌ์ฉํ์ฌ ํ๋ก์ธ์ค ์์ ๋ฐ๋ผ ํ๋กฌํํธ๋ฅผ ์๋์ผ๋ก ๋ถ๋ฐฐํฉ๋๋ค.
```py
from accelerate import PartialState
from diffusers import DiffusionPipeline
pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
distributed_state = PartialState()
pipeline.to(distributed_state.device)
with distributed_state.split_between_processes(["a dog", "a cat"]) as prompt:
result = pipeline(prompt).images[0]
result.save(f"result_{distributed_state.process_index}.png")
```
Use the `--num_processes` argument to specify the number of GPUs to use, and call `accelerate launch` to run the script:
```bash
accelerate launch run_distributed.py --num_processes=2
```
<Tip>์์ธํ ๋ด์ฉ์ [๐ค Accelerate๋ฅผ ์ฌ์ฉํ ๋ถ์ฐ ์ถ๋ก ](https://huggingface.co/docs/accelerate/en/usage_guides/distributed_inference#distributed-inference-with-accelerate) ๊ฐ์ด๋๋ฅผ ์ฐธ์กฐํ์ธ์.
</Tip>
## Pytoerch ๋ถ์ฐ
PyTorch๋ ๋ฐ์ดํฐ ๋ณ๋ ฌ ์ฒ๋ฆฌ๋ฅผ ๊ฐ๋ฅํ๊ฒ ํ๋ [`DistributedDataParallel`](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html)์ ์ง์ํฉ๋๋ค.
์์ํ๋ ค๋ฉด Python ํ์ผ์ ์์ฑํ๊ณ `torch.distributed` ๋ฐ `torch.multiprocessing`์ ์ํฌํธํ์ฌ ๋ถ์ฐ ํ๋ก์ธ์ค ๊ทธ๋ฃน์ ์ค์ ํ๊ณ ๊ฐ GPU์์ ์ถ๋ก ์ฉ ํ๋ก์ธ์ค๋ฅผ ์์ฑํฉ๋๋ค. ๊ทธ๋ฆฌ๊ณ [`DiffusionPipeline`]๋ ์ด๊ธฐํํด์ผ ํฉ๋๋ค:
ํ์ฐ ํ์ดํ๋ผ์ธ์ `rank`๋ก ์ด๋ํ๊ณ `get_rank`๋ฅผ ์ฌ์ฉํ์ฌ ๊ฐ ํ๋ก์ธ์ค์ GPU๋ฅผ ํ ๋นํ๋ฉด ๊ฐ ํ๋ก์ธ์ค๊ฐ ๋ค๋ฅธ ํ๋กฌํํธ๋ฅผ ์ฒ๋ฆฌํฉ๋๋ค:
```py
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from diffusers import DiffusionPipeline
sd = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
```
์ฌ์ฉํ ๋ฐฑ์๋ ์ ํ, ํ์ฌ ํ๋ก์ธ์ค์ `rank`, `world_size` ๋๋ ์ฐธ์ฌํ๋ ํ๋ก์ธ์ค ์๋ก ๋ถ์ฐ ํ๊ฒฝ ์์ฑ์ ์ฒ๋ฆฌํ๋ ํจ์[`init_process_group`]๋ฅผ ๋ง๋ค์ด ์ถ๋ก ์ ์คํํด์ผ ํฉ๋๋ค.
2๊ฐ์ GPU์์ ์ถ๋ก ์ ๋ณ๋ ฌ๋ก ์คํํ๋ ๊ฒฝ์ฐ `world_size`๋ 2์
๋๋ค.
```py
def run_inference(rank, world_size):
dist.init_process_group("nccl", rank=rank, world_size=world_size)
sd.to(rank)
if torch.distributed.get_rank() == 0:
prompt = "a dog"
elif torch.distributed.get_rank() == 1:
prompt = "a cat"
image = sd(prompt).images[0]
image.save(f"./{'_'.join(prompt)}.png")
```
๋ถ์ฐ ์ถ๋ก ์ ์คํํ๋ ค๋ฉด [`mp.spawn`](https://pytorch.org/docs/stable/multiprocessing.html#torch.multiprocessing.spawn)์ ํธ์ถํ์ฌ `world_size`์ ์ ์๋ GPU ์์ ๋ํด `run_inference` ํจ์๋ฅผ ์คํํฉ๋๋ค:
```py
def main():
world_size = 2
mp.spawn(run_inference, args=(world_size,), nprocs=world_size, join=True)
if __name__ == "__main__":
main()
```
์ถ๋ก ์คํฌ๋ฆฝํธ๋ฅผ ์๋ฃํ์ผ๋ฉด `--nproc_per_node` ์ธ์๋ฅผ ์ฌ์ฉํ์ฌ ์ฌ์ฉํ GPU ์๋ฅผ ์ง์ ํ๊ณ `torchrun`์ ํธ์ถํ์ฌ ์คํฌ๋ฆฝํธ๋ฅผ ์คํํฉ๋๋ค:
```bash
torchrun run_distributed.py --nproc_per_node=2
```
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/training/text2image.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Text-to-image
<Tip warning={true}>
text-to-image ํ์ธํ๋ ์คํฌ๋ฆฝํธ๋ experimental ์ํ์
๋๋ค. ๊ณผ์ ํฉํ๊ธฐ ์ฝ๊ณ ์น๋ช
์ ์ธ ๋ง๊ฐ๊ณผ ๊ฐ์ ๋ฌธ์ ์ ๋ถ๋ชํ๊ธฐ ์ฝ์ต๋๋ค. ์์ฒด ๋ฐ์ดํฐ์
์์ ์ต์์ ๊ฒฐ๊ณผ๋ฅผ ์ป์ผ๋ ค๋ฉด ๋ค์ํ ํ์ดํผํ๋ผ๋ฏธํฐ๋ฅผ ํ์ํ๋ ๊ฒ์ด ์ข์ต๋๋ค.
</Tip>
Stable Diffusion๊ณผ ๊ฐ์ text-to-image ๋ชจ๋ธ์ ํ
์คํธ ํ๋กฌํํธ์์ ์ด๋ฏธ์ง๋ฅผ ์์ฑํฉ๋๋ค. ์ด ๊ฐ์ด๋๋ PyTorch ๋ฐ Flax๋ฅผ ์ฌ์ฉํ์ฌ ์์ฒด ๋ฐ์ดํฐ์
์์ [`CompVis/stable-diffusion-v1-4`](https://huggingface.co/CompVis/stable-diffusion-v1-4) ๋ชจ๋ธ๋ก ํ์ธํ๋ํ๋ ๋ฐฉ๋ฒ์ ๋ณด์ฌ์ค๋๋ค. ์ด ๊ฐ์ด๋์ ์ฌ์ฉ๋ text-to-image ํ์ธํ๋์ ์ํ ๋ชจ๋ ํ์ต ์คํฌ๋ฆฝํธ์ ๊ด์ฌ์ด ์๋ ๊ฒฝ์ฐ ์ด [๋ฆฌํฌ์งํ ๋ฆฌ](https://github.com/huggingface/diffusers/tree/main/examples/text_to_image)์์ ์์ธํ ์ฐพ์ ์ ์์ต๋๋ค.
์คํฌ๋ฆฝํธ๋ฅผ ์คํํ๊ธฐ ์ ์, ๋ผ์ด๋ธ๋ฌ๋ฆฌ์ ํ์ต dependency๋ค์ ์ค์นํด์ผ ํฉ๋๋ค:
```bash
pip install git+https://github.com/huggingface/diffusers.git
pip install -U -r requirements.txt
```
๊ทธ๋ฆฌ๊ณ [๐คAccelerate](https://github.com/huggingface/accelerate/) ํ๊ฒฝ์ ์ด๊ธฐํํฉ๋๋ค:
```bash
accelerate config
```
๋ฆฌํฌ์งํ ๋ฆฌ๋ฅผ ์ด๋ฏธ ๋ณต์ ํ ๊ฒฝ์ฐ, ์ด ๋จ๊ณ๋ฅผ ์ํํ ํ์๊ฐ ์์ต๋๋ค. ๋์ , ๋ก์ปฌ ์ฒดํฌ์์ ๊ฒฝ๋ก๋ฅผ ํ์ต ์คํฌ๋ฆฝํธ์ ๋ช
์ํ ์ ์์ผ๋ฉฐ ๊ฑฐ๊ธฐ์์ ๋ก๋๋ฉ๋๋ค.
### ํ๋์จ์ด ์๊ตฌ ์ฌํญ
`gradient_checkpointing` ๋ฐ `mixed_precision`์ ์ฌ์ฉํ๋ฉด ๋จ์ผ 24GB GPU์์ ๋ชจ๋ธ์ ํ์ธํ๋ํ ์ ์์ต๋๋ค. ๋ ๋์ `batch_size`์ ๋ ๋น ๋ฅธ ํ๋ จ์ ์ํด์๋ GPU ๋ฉ๋ชจ๋ฆฌ๊ฐ 30GB ์ด์์ธ GPU๋ฅผ ์ฌ์ฉํ๋ ๊ฒ์ด ์ข์ต๋๋ค. TPU ๋๋ GPU์์ ํ์ธํ๋์ ์ํด JAX๋ Flax๋ฅผ ์ฌ์ฉํ ์๋ ์์ต๋๋ค. ์์ธํ ๋ด์ฉ์ [์๋](#flax-jax-finetuning)๋ฅผ ์ฐธ์กฐํ์ธ์.
xFormers๋ก memory efficient attention์ ํ์ฑํํ์ฌ ๋ฉ๋ชจ๋ฆฌ ์ฌ์ฉ๋ ํจ์ฌ ๋ ์ค์ผ ์ ์์ต๋๋ค. [xFormers๊ฐ ์ค์น](./optimization/xformers)๋์ด ์๋์ง ํ์ธํ๊ณ `--enable_xformers_memory_efficient_attention`๋ฅผ ํ์ต ์คํฌ๋ฆฝํธ์ ๋ช
์ํฉ๋๋ค.
xFormers๋ Flax์ ์ฌ์ฉํ ์ ์์ต๋๋ค.
## Hub์ ๋ชจ๋ธ ์
๋ก๋ํ๊ธฐ
ํ์ต ์คํฌ๋ฆฝํธ์ ๋ค์ ์ธ์๋ฅผ ์ถ๊ฐํ์ฌ ๋ชจ๋ธ์ ํ๋ธ์ ์ ์ฅํฉ๋๋ค:
```bash
--push_to_hub
```
## ์ฒดํฌํฌ์ธํธ ์ ์ฅ ๋ฐ ๋ถ๋ฌ์ค๊ธฐ
ํ์ต ์ค ๋ฐ์ํ ์ ์๋ ์ผ์ ๋๋นํ์ฌ ์ ๊ธฐ์ ์ผ๋ก ์ฒดํฌํฌ์ธํธ๋ฅผ ์ ์ฅํด ๋๋ ๊ฒ์ด ์ข์ต๋๋ค. ์ฒดํฌํฌ์ธํธ๋ฅผ ์ ์ฅํ๋ ค๋ฉด ํ์ต ์คํฌ๋ฆฝํธ์ ๋ค์ ์ธ์๋ฅผ ๋ช
์ํฉ๋๋ค.
```bash
--checkpointing_steps=500
```
500์คํ
๋ง๋ค ์ ์ฒด ํ์ต state๊ฐ 'output_dir'์ ํ์ ํด๋์ ์ ์ฅ๋ฉ๋๋ค. ์ฒดํฌํฌ์ธํธ๋ 'checkpoint-'์ ์ง๊ธ๊น์ง ํ์ต๋ step ์์
๋๋ค. ์๋ฅผ ๋ค์ด 'checkpoint-1500'์ 1500 ํ์ต step ํ์ ์ ์ฅ๋ ์ฒดํฌํฌ์ธํธ์
๋๋ค.
ํ์ต์ ์ฌ๊ฐํ๊ธฐ ์ํด ์ฒดํฌํฌ์ธํธ๋ฅผ ๋ถ๋ฌ์ค๋ ค๋ฉด '--resume_from_checkpoint' ์ธ์๋ฅผ ํ์ต ์คํฌ๋ฆฝํธ์ ๋ช
์ํ๊ณ ์ฌ๊ฐํ ์ฒดํฌํฌ์ธํธ๋ฅผ ์ง์ ํ์ญ์์ค. ์๋ฅผ ๋ค์ด ๋ค์ ์ธ์๋ 1500๊ฐ์ ํ์ต step ํ์ ์ ์ฅ๋ ์ฒดํฌํฌ์ธํธ์์๋ถํฐ ํ๋ จ์ ์ฌ๊ฐํฉ๋๋ค.
```bash
--resume_from_checkpoint="checkpoint-1500"
```
## ํ์ธํ๋
<frameworkcontent>
<pt>
๋ค์๊ณผ ๊ฐ์ด [Pokรฉmon BLIP ์บก์
](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions) ๋ฐ์ดํฐ์
์์ ํ์ธํ๋ ์คํ์ ์ํด [PyTorch ํ์ต ์คํฌ๋ฆฝํธ](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py)๋ฅผ ์คํํฉ๋๋ค:
```bash
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
export dataset_name="lambdalabs/pokemon-blip-captions"
accelerate launch train_text_to_image.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--dataset_name=$dataset_name \
--use_ema \
--resolution=512 --center_crop --random_flip \
--train_batch_size=1 \
--gradient_accumulation_steps=4 \
--gradient_checkpointing \
--mixed_precision="fp16" \
--max_train_steps=15000 \
--learning_rate=1e-05 \
--max_grad_norm=1 \
--lr_scheduler="constant" --lr_warmup_steps=0 \
--output_dir="sd-pokemon-model"
```
์์ฒด ๋ฐ์ดํฐ์
์ผ๋ก ํ์ธํ๋ํ๋ ค๋ฉด ๐ค [Datasets](https://huggingface.co/docs/datasets/index)์์ ์๊ตฌํ๋ ํ์์ ๋ฐ๋ผ ๋ฐ์ดํฐ์
์ ์ค๋นํ์ธ์. [๋ฐ์ดํฐ์
์ ํ๋ธ์ ์
๋ก๋](https://huggingface.co/docs/datasets/image_dataset#upload-dataset-to-the-hub)ํ๊ฑฐ๋ [ํ์ผ๋ค์ด ์๋ ๋ก์ปฌ ํด๋๋ฅผ ์ค๋น](https ://huggingface.co/docs/datasets/image_dataset#imagefolder)ํ ์ ์์ต๋๋ค.
์ฌ์ฉ์ ์ปค์คํ
loading logic์ ์ฌ์ฉํ๋ ค๋ฉด ์คํฌ๋ฆฝํธ๋ฅผ ์์ ํ์ญ์์ค. ๋์์ด ๋๋๋ก ์ฝ๋์ ์ ์ ํ ์์น์ ํฌ์ธํฐ๋ฅผ ๋จ๊ฒผ์ต๋๋ค. ๐ค ์๋ ์์ ์คํฌ๋ฆฝํธ๋ `TRAIN_DIR`์ ๋ก์ปฌ ๋ฐ์ดํฐ์
์ผ๋ก๋ฅผ ํ์ธํ๋ํ๋ ๋ฐฉ๋ฒ๊ณผ `OUTPUT_DIR`์์ ๋ชจ๋ธ์ ์ ์ฅํ ์์น๋ฅผ ๋ณด์ฌ์ค๋๋ค:
```bash
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
export TRAIN_DIR="path_to_your_dataset"
export OUTPUT_DIR="path_to_save_model"
accelerate launch train_text_to_image.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--train_data_dir=$TRAIN_DIR \
--use_ema \
--resolution=512 --center_crop --random_flip \
--train_batch_size=1 \
--gradient_accumulation_steps=4 \
--gradient_checkpointing \
--mixed_precision="fp16" \
--max_train_steps=15000 \
--learning_rate=1e-05 \
--max_grad_norm=1 \
--lr_scheduler="constant" --lr_warmup_steps=0 \
--output_dir=${OUTPUT_DIR}
```
</pt>
<jax>
[@duongna211](https://github.com/duongna21)์ ๊ธฐ์ฌ๋ก, Flax๋ฅผ ์ฌ์ฉํด TPU ๋ฐ GPU์์ Stable Diffusion ๋ชจ๋ธ์ ๋ ๋น ๋ฅด๊ฒ ํ์ตํ ์ ์์ต๋๋ค. ์ด๋ TPU ํ๋์จ์ด์์ ๋งค์ฐ ํจ์จ์ ์ด์ง๋ง GPU์์๋ ํ๋ฅญํ๊ฒ ์๋ํฉ๋๋ค. Flax ํ์ต ์คํฌ๋ฆฝํธ๋ gradient checkpointing๋ gradient accumulation๊ณผ ๊ฐ์ ๊ธฐ๋ฅ์ ์์ง ์ง์ํ์ง ์์ผ๋ฏ๋ก ๋ฉ๋ชจ๋ฆฌ๊ฐ 30GB ์ด์์ธ GPU ๋๋ TPU v3๊ฐ ํ์ํฉ๋๋ค.
์คํฌ๋ฆฝํธ๋ฅผ ์คํํ๊ธฐ ์ ์ ์๊ตฌ ์ฌํญ์ด ์ค์น๋์ด ์๋์ง ํ์ธํ์ญ์์ค:
```bash
pip install -U -r requirements_flax.txt
```
๊ทธ๋ฌ๋ฉด ๋ค์๊ณผ ๊ฐ์ด [Flax ํ์ต ์คํฌ๋ฆฝํธ](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_flax.py)๋ฅผ ์คํํ ์ ์์ต๋๋ค.
```bash
export MODEL_NAME="runwayml/stable-diffusion-v1-5"
export dataset_name="lambdalabs/pokemon-blip-captions"
python train_text_to_image_flax.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--dataset_name=$dataset_name \
--resolution=512 --center_crop --random_flip \
--train_batch_size=1 \
--max_train_steps=15000 \
--learning_rate=1e-05 \
--max_grad_norm=1 \
--output_dir="sd-pokemon-model"
```
์์ฒด ๋ฐ์ดํฐ์
์ผ๋ก ํ์ธํ๋ํ๋ ค๋ฉด ๐ค [Datasets](https://huggingface.co/docs/datasets/index)์์ ์๊ตฌํ๋ ํ์์ ๋ฐ๋ผ ๋ฐ์ดํฐ์
์ ์ค๋นํ์ธ์. [๋ฐ์ดํฐ์
์ ํ๋ธ์ ์
๋ก๋](https://huggingface.co/docs/datasets/image_dataset#upload-dataset-to-the-hub)ํ๊ฑฐ๋ [ํ์ผ๋ค์ด ์๋ ๋ก์ปฌ ํด๋๋ฅผ ์ค๋น](https ://huggingface.co/docs/datasets/image_dataset#imagefolder)ํ ์ ์์ต๋๋ค.
์ฌ์ฉ์ ์ปค์คํ
loading logic์ ์ฌ์ฉํ๋ ค๋ฉด ์คํฌ๋ฆฝํธ๋ฅผ ์์ ํ์ญ์์ค. ๋์์ด ๋๋๋ก ์ฝ๋์ ์ ์ ํ ์์น์ ํฌ์ธํฐ๋ฅผ ๋จ๊ฒผ์ต๋๋ค. ๐ค ์๋ ์์ ์คํฌ๋ฆฝํธ๋ `TRAIN_DIR`์ ๋ก์ปฌ ๋ฐ์ดํฐ์
์ผ๋ก๋ฅผ ํ์ธํ๋ํ๋ ๋ฐฉ๋ฒ์ ๋ณด์ฌ์ค๋๋ค:
```bash
export MODEL_NAME="duongna/stable-diffusion-v1-4-flax"
export TRAIN_DIR="path_to_your_dataset"
python train_text_to_image_flax.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--train_data_dir=$TRAIN_DIR \
--resolution=512 --center_crop --random_flip \
--train_batch_size=1 \
--mixed_precision="fp16" \
--max_train_steps=15000 \
--learning_rate=1e-05 \
--max_grad_norm=1 \
--output_dir="sd-pokemon-model"
```
</jax>
</frameworkcontent>
## LoRA
Text-to-image ๋ชจ๋ธ ํ์ธํ๋์ ์ํด, ๋๊ท๋ชจ ๋ชจ๋ธ ํ์ต์ ๊ฐ์ํํ๊ธฐ ์ํ ํ์ธํ๋ ๊ธฐ์ ์ธ LoRA(Low-Rank Adaptation of Large Language Models)๋ฅผ ์ฌ์ฉํ ์ ์์ต๋๋ค. ์์ธํ ๋ด์ฉ์ [LoRA ํ์ต](lora#text-to-image) ๊ฐ์ด๋๋ฅผ ์ฐธ์กฐํ์ธ์.
## ์ถ๋ก
ํ๋ธ์ ๋ชจ๋ธ ๊ฒฝ๋ก ๋๋ ๋ชจ๋ธ ์ด๋ฆ์ [`StableDiffusionPipeline`]์ ์ ๋ฌํ์ฌ ์ถ๋ก ์ ์ํด ํ์ธ ํ๋๋ ๋ชจ๋ธ์ ๋ถ๋ฌ์ฌ ์ ์์ต๋๋ค:
<frameworkcontent>
<pt>
```python
from diffusers import StableDiffusionPipeline
model_path = "path_to_saved_model"
pipe = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=torch.float16)
pipe.to("cuda")
image = pipe(prompt="yoda").images[0]
image.save("yoda-pokemon.png")
```
</pt>
<jax>
```python
import jax
import numpy as np
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxStableDiffusionPipeline
model_path = "path_to_saved_model"
pipe, params = FlaxStableDiffusionPipeline.from_pretrained(model_path, dtype=jax.numpy.bfloat16)
prompt = "yoda pokemon"
prng_seed = jax.random.PRNGKey(0)
num_inference_steps = 50
num_samples = jax.device_count()
prompt = num_samples * [prompt]
prompt_ids = pipeline.prepare_inputs(prompt)
# shard inputs and rng
params = replicate(params)
prng_seed = jax.random.split(prng_seed, jax.device_count())
prompt_ids = shard(prompt_ids)
images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images
images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
image.save("yoda-pokemon.png")
```
</jax>
</frameworkcontent>
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/training/instructpix2pix.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# InstructPix2Pix
[InstructPix2Pix](https://arxiv.org/abs/2211.09800)๋ text-conditioned diffusion ๋ชจ๋ธ์ด ํ ์ด๋ฏธ์ง์ ํธ์ง์ ๋ฐ๋ฅผ ์ ์๋๋ก ํ์ธํ๋ํ๋ ๋ฐฉ๋ฒ์
๋๋ค. ์ด ๋ฐฉ๋ฒ์ ์ฌ์ฉํ์ฌ ํ์ธํ๋๋ ๋ชจ๋ธ์ ๋ค์์ ์
๋ ฅ์ผ๋ก ์ฌ์ฉํฉ๋๋ค:
<p align="center">
<img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/evaluation_diffusion_models/edit-instruction.png" alt="instructpix2pix-inputs" width=600/>
</p>
์ถ๋ ฅ์ ์
๋ ฅ ์ด๋ฏธ์ง์ ํธ์ง ์ง์๊ฐ ๋ฐ์๋ "์์ ๋" ์ด๋ฏธ์ง์
๋๋ค:
<p align="center">
<img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/output-gs%407-igs%401-steps%4050.png" alt="instructpix2pix-output" width=600/>
</p>
`train_instruct_pix2pix.py` ์คํฌ๋ฆฝํธ([์ฌ๊ธฐ](https://github.com/huggingface/diffusers/blob/main/examples/instruct_pix2pix/train_instruct_pix2pix.py)์์ ์ฐพ์ ์ ์์ต๋๋ค.)๋ ํ์ต ์ ์ฐจ๋ฅผ ์ค๋ช
ํ๊ณ Stable Diffusion์ ์ ์ฉํ ์ ์๋ ๋ฐฉ๋ฒ์ ๋ณด์ฌ์ค๋๋ค.
*** `train_instruct_pix2pix.py`๋ [์๋ ๊ตฌํ](https://github.com/timothybrooks/instruct-pix2pix)์ ์ถฉ์คํ๋ฉด์ InstructPix2Pix ํ์ต ์ ์ฐจ๋ฅผ ๊ตฌํํ๊ณ ์์ง๋ง, [์๊ท๋ชจ ๋ฐ์ดํฐ์
](https://huggingface.co/datasets/fusing/instructpix2pix-1000-samples)์์๋ง ํ
์คํธ๋ฅผ ํ์ต๋๋ค. ์ด๋ ์ต์ข
๊ฒฐ๊ณผ์ ์ํฅ์ ๋ผ์น ์ ์์ต๋๋ค. ๋ ๋์ ๊ฒฐ๊ณผ๋ฅผ ์ํด, ๋ ํฐ ๋ฐ์ดํฐ์
์์ ๋ ๊ธธ๊ฒ ํ์ตํ๋ ๊ฒ์ ๊ถ์ฅํฉ๋๋ค. [์ฌ๊ธฐ](https://huggingface.co/datasets/timbrooks/instructpix2pix-clip-filtered)์์ InstructPix2Pix ํ์ต์ ์ํด ํฐ ๋ฐ์ดํฐ์
์ ์ฐพ์ ์ ์์ต๋๋ค.
***
## PyTorch๋ก ๋ก์ปฌ์์ ์คํํ๊ธฐ
### ์ข
์์ฑ(dependencies) ์ค์นํ๊ธฐ
์ด ์คํฌ๋ฆฝํธ๋ฅผ ์คํํ๊ธฐ ์ ์, ๋ผ์ด๋ธ๋ฌ๋ฆฌ์ ํ์ต ์ข
์์ฑ์ ์ค์นํ์ธ์:
**์ค์**
์ต์ ๋ฒ์ ์ ์์ ์คํฌ๋ฆฝํธ๋ฅผ ์ฑ๊ณต์ ์ผ๋ก ์คํํ๊ธฐ ์ํด, **์๋ณธ์ผ๋ก๋ถํฐ ์ค์น**ํ๋ ๊ฒ๊ณผ ์์ ์คํฌ๋ฆฝํธ๋ฅผ ์์ฃผ ์
๋ฐ์ดํธํ๊ณ ์์ ๋ณ ์๊ตฌ์ฌํญ์ ์ค์นํ๊ธฐ ๋๋ฌธ์ ์ต์ ์ํ๋ก ์ ์งํ๋ ๊ฒ์ ๊ถ์ฅํฉ๋๋ค. ์ด๋ฅผ ์ํด, ์๋ก์ด ๊ฐ์ ํ๊ฒฝ์์ ๋ค์ ์คํ
์ ์คํํ์ธ์:
```bash
git clone https://github.com/huggingface/diffusers
cd diffusers
pip install -e .
```
cd ๋ช
๋ น์ด๋ก ์์ ํด๋๋ก ์ด๋ํ์ธ์.
```bash
cd examples/instruct_pix2pix
```
์ด์ ์คํํ์ธ์.
```bash
pip install -r requirements.txt
```
๊ทธ๋ฆฌ๊ณ [๐คAccelerate](https://github.com/huggingface/accelerate/) ํ๊ฒฝ์์ ์ด๊ธฐํํ์ธ์:
```bash
accelerate config
```
ํน์ ํ๊ฒฝ์ ๋ํ ์ง๋ฌธ ์์ด ๊ธฐ๋ณธ์ ์ธ accelerate ๊ตฌ์ฑ์ ์ฌ์ฉํ๋ ค๋ฉด ๋ค์์ ์คํํ์ธ์.
```bash
accelerate config default
```
ํน์ ์ฌ์ฉ ์ค์ธ ํ๊ฒฝ์ด notebook๊ณผ ๊ฐ์ ๋ํํ ์์ ์ง์ํ์ง ์๋ ๊ฒฝ์ฐ๋ ๋ค์ ์ ์ฐจ๋ฅผ ๋ฐ๋ผ์ฃผ์ธ์.
```python
from accelerate.utils import write_basic_config
write_basic_config()
```
### ์์
์ด์ ์ ์ธ๊ธํ๋ฏ์ด, ํ์ต์ ์ํด [์์ ๋ฐ์ดํฐ์
](https://huggingface.co/datasets/fusing/instructpix2pix-1000-samples)์ ์ฌ์ฉํ ๊ฒ์
๋๋ค. ๊ทธ ๋ฐ์ดํฐ์
์ InstructPix2Pix ๋
ผ๋ฌธ์์ ์ฌ์ฉ๋ [์๋์ ๋ฐ์ดํฐ์
](https://huggingface.co/datasets/timbrooks/instructpix2pix-clip-filtered)๋ณด๋ค ์์ ๋ฒ์ ์
๋๋ค. ์์ ์ ๋ฐ์ดํฐ์
์ ์ฌ์ฉํ๊ธฐ ์ํด, [ํ์ต์ ์ํ ๋ฐ์ดํฐ์
๋ง๋ค๊ธฐ](create_dataset) ๊ฐ์ด๋๋ฅผ ์ฐธ๊ณ ํ์ธ์.
`MODEL_NAME` ํ๊ฒฝ ๋ณ์(ํ๋ธ ๋ชจ๋ธ ๋ ํฌ์งํ ๋ฆฌ ๋๋ ๋ชจ๋ธ ๊ฐ์ค์น๊ฐ ํฌํจ๋ ํด๋ ๊ฒฝ๋ก)๋ฅผ ์ง์ ํ๊ณ [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) ์ธ์์ ์ ๋ฌํฉ๋๋ค. `DATASET_ID`์ ๋ฐ์ดํฐ์
์ด๋ฆ์ ์ง์ ํด์ผ ํฉ๋๋ค:
```bash
export MODEL_NAME="runwayml/stable-diffusion-v1-5"
export DATASET_ID="fusing/instructpix2pix-1000-samples"
```
์ง๊ธ, ํ์ต์ ์คํํ ์ ์์ต๋๋ค. ์คํฌ๋ฆฝํธ๋ ๋ ํฌ์งํ ๋ฆฌ์ ํ์ ํด๋์ ๋ชจ๋ ๊ตฌ์ฑ์์(`feature_extractor`, `scheduler`, `text_encoder`, `unet` ๋ฑ)๋ฅผ ์ ์ฅํฉ๋๋ค.
```bash
accelerate launch --mixed_precision="fp16" train_instruct_pix2pix.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--dataset_name=$DATASET_ID \
--enable_xformers_memory_efficient_attention \
--resolution=256 --random_flip \
--train_batch_size=4 --gradient_accumulation_steps=4 --gradient_checkpointing \
--max_train_steps=15000 \
--checkpointing_steps=5000 --checkpoints_total_limit=1 \
--learning_rate=5e-05 --max_grad_norm=1 --lr_warmup_steps=0 \
--conditioning_dropout_prob=0.05 \
--mixed_precision=fp16 \
--seed=42 \
--push_to_hub
```
์ถ๊ฐ์ ์ผ๋ก, ๊ฐ์ค์น์ ๋ฐ์ด์ด์ค๋ฅผ ํ์ต ๊ณผ์ ์ ๋ชจ๋ํฐ๋งํ์ฌ ๊ฒ์ฆ ์ถ๋ก ์ ์ํํ๋ ๊ฒ์ ์ง์ํฉ๋๋ค. `report_to="wandb"`์ ์ด ๊ธฐ๋ฅ์ ์ฌ์ฉํ ์ ์์ต๋๋ค:
```bash
accelerate launch --mixed_precision="fp16" train_instruct_pix2pix.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--dataset_name=$DATASET_ID \
--enable_xformers_memory_efficient_attention \
--resolution=256 --random_flip \
--train_batch_size=4 --gradient_accumulation_steps=4 --gradient_checkpointing \
--max_train_steps=15000 \
--checkpointing_steps=5000 --checkpoints_total_limit=1 \
--learning_rate=5e-05 --max_grad_norm=1 --lr_warmup_steps=0 \
--conditioning_dropout_prob=0.05 \
--mixed_precision=fp16 \
--val_image_url="https://hf.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png" \
--validation_prompt="make the mountains snowy" \
--seed=42 \
--report_to=wandb \
--push_to_hub
```
๋ชจ๋ธ ๋๋ฒ๊น
์ ์ ์ฉํ ์ด ํ๊ฐ ๋ฐฉ๋ฒ ๊ถ์ฅํฉ๋๋ค. ์ด๋ฅผ ์ฌ์ฉํ๊ธฐ ์ํด `wandb`๋ฅผ ์ค์นํ๋ ๊ฒ์ ์ฃผ๋ชฉํด์ฃผ์ธ์. `pip install wandb`๋ก ์คํํด `wandb`๋ฅผ ์ค์นํ ์ ์์ต๋๋ค.
[์ฌ๊ธฐ](https://wandb.ai/sayakpaul/instruct-pix2pix/runs/ctr3kovq), ๋ช ๊ฐ์ง ํ๊ฐ ๋ฐฉ๋ฒ๊ณผ ํ์ต ํ๋ผ๋ฏธํฐ๋ฅผ ํฌํจํ๋ ์์๋ฅผ ๋ณผ ์ ์์ต๋๋ค.
***์ฐธ๊ณ : ์๋ณธ ๋
ผ๋ฌธ์์, ์ ์๋ค์ 256x256 ์ด๋ฏธ์ง ํด์๋๋ก ํ์ตํ ๋ชจ๋ธ๋ก 512x512์ ๊ฐ์ ๋ ํฐ ํด์๋๋ก ์ ์ผ๋ฐํ๋๋ ๊ฒ์ ๋ณผ ์ ์์์ต๋๋ค. ์ด๋ ํ์ต์ ์ฌ์ฉํ ํฐ ๋ฐ์ดํฐ์
์ ์ฌ์ฉํ๊ธฐ ๋๋ฌธ์
๋๋ค.***
## ๋ค์์ GPU๋ก ํ์ตํ๊ธฐ
`accelerate`๋ ์ํํ ๋ค์์ GPU๋ก ํ์ต์ ๊ฐ๋ฅํ๊ฒ ํฉ๋๋ค. `accelerate`๋ก ๋ถ์ฐ ํ์ต์ ์คํํ๋ [์ฌ๊ธฐ](https://huggingface.co/docs/accelerate/basic_tutorials/launch) ์ค๋ช
์ ๋ฐ๋ผ ํด ์ฃผ์๊ธฐ ๋ฐ๋๋๋ค. ์์์ ๋ช
๋ น์ด ์
๋๋ค:
```bash
accelerate launch --mixed_precision="fp16" --multi_gpu train_instruct_pix2pix.py \
--pretrained_model_name_or_path=runwayml/stable-diffusion-v1-5 \
--dataset_name=sayakpaul/instructpix2pix-1000-samples \
--use_ema \
--enable_xformers_memory_efficient_attention \
--resolution=512 --random_flip \
--train_batch_size=4 --gradient_accumulation_steps=4 --gradient_checkpointing \
--max_train_steps=15000 \
--checkpointing_steps=5000 --checkpoints_total_limit=1 \
--learning_rate=5e-05 --lr_warmup_steps=0 \
--conditioning_dropout_prob=0.05 \
--mixed_precision=fp16 \
--seed=42 \
--push_to_hub
```
## ์ถ๋ก ํ๊ธฐ
์ผ๋จ ํ์ต์ด ์๋ฃ๋๋ฉด, ์ถ๋ก ํ ์ ์์ต๋๋ค:
```python
import PIL
import requests
import torch
from diffusers import StableDiffusionInstructPix2PixPipeline
model_id = "your_model_id" # <- ์ด๋ฅผ ์์ ํ์ธ์.
pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
generator = torch.Generator("cuda").manual_seed(0)
url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/test_pix2pix_4.png"
def download_image(url):
image = PIL.Image.open(requests.get(url, stream=True).raw)
image = PIL.ImageOps.exif_transpose(image)
image = image.convert("RGB")
return image
image = download_image(url)
prompt = "wipe out the lake"
num_inference_steps = 20
image_guidance_scale = 1.5
guidance_scale = 10
edited_image = pipe(
prompt,
image=image,
num_inference_steps=num_inference_steps,
image_guidance_scale=image_guidance_scale,
guidance_scale=guidance_scale,
generator=generator,
).images[0]
edited_image.save("edited_image.png")
```
ํ์ต ์คํฌ๋ฆฝํธ๋ฅผ ์ฌ์ฉํด ์ป์ ์์์ ๋ชจ๋ธ ๋ ํฌ์งํ ๋ฆฌ๋ ์ฌ๊ธฐ [sayakpaul/instruct-pix2pix](https://huggingface.co/sayakpaul/instruct-pix2pix)์์ ํ์ธํ ์ ์์ต๋๋ค.
์ฑ๋ฅ์ ์ํ ์๋์ ํ์ง์ ์ ์ดํ๊ธฐ ์ํด ์ธ ๊ฐ์ง ํ๋ผ๋ฏธํฐ๋ฅผ ์ฌ์ฉํ๋ ๊ฒ์ด ์ข์ต๋๋ค:
* `num_inference_steps`
* `image_guidance_scale`
* `guidance_scale`
ํนํ, `image_guidance_scale`์ `guidance_scale`๋ ์์ฑ๋("์์ ๋") ์ด๋ฏธ์ง์์ ํฐ ์ํฅ์ ๋ฏธ์น ์ ์์ต๋๋ค.([์ฌ๊ธฐ](https://twitter.com/RisingSayak/status/1628392199196151808?s=20)์์๋ฅผ ์ฐธ๊ณ ํด์ฃผ์ธ์.)
๋ง์ฝ InstructPix2Pix ํ์ต ๋ฐฉ๋ฒ์ ์ฌ์ฉํด ๋ช ๊ฐ์ง ํฅ๋ฏธ๋ก์ด ๋ฐฉ๋ฒ์ ์ฐพ๊ณ ์๋ค๋ฉด, ์ด ๋ธ๋ก๊ทธ ๊ฒ์๋ฌผ[Instruction-tuning Stable Diffusion with InstructPix2Pix](https://huggingface.co/blog/instruction-tuning-sd)์ ํ์ธํด์ฃผ์ธ์.
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/training/overview.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# ๐งจ Diffusers ํ์ต ์์
์ด๋ฒ ์ฑํฐ์์๋ ๋ค์ํ ์ ์ฆ์ผ์ด์ค๋ค์ ๋ํ ์์ ์ฝ๋๋ค์ ํตํด ์ด๋ป๊ฒํ๋ฉด ํจ๊ณผ์ ์ผ๋ก `diffusers` ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ฅผ ์ฌ์ฉํ ์ ์์๊น์ ๋ํด ์์๋ณด๋๋ก ํ๊ฒ ์ต๋๋ค.
**Note**: ํน์ ์คํผ์
ํ ์์์ฝ๋๋ฅผ ์ฐพ๊ณ ์๋ค๋ฉด, [์ฌ๊ธฐ](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines)๋ฅผ ์ฐธ๊ณ ํด๋ณด์ธ์!
์ฌ๊ธฐ์ ๋ค๋ฃฐ ์์๋ค์ ๋ค์์ ์งํฅํฉ๋๋ค.
- **์์ฌ์ด ๋ํ๋์ ์ค์น** (Self-contained) : ์ฌ๊ธฐ์ ์ฌ์ฉ๋ ์์ ์ฝ๋๋ค์ ๋ํ๋์ ํจํค์ง๋ค์ ์ ๋ถ `pip install` ๋ช
๋ น์ด๋ฅผ ํตํด ์ค์น ๊ฐ๋ฅํ ํจํค์ง๋ค์
๋๋ค. ๋ํ ์น์ ํ๊ฒ `requirements.txt` ํ์ผ์ ํด๋น ํจํค์ง๋ค์ด ๋ช
์๋์ด ์์ด, `pip install -r requirements.txt`๋ก ๊ฐํธํ๊ฒ ํด๋น ๋ํ๋์๋ค์ ์ค์นํ ์ ์์ต๋๋ค. ์์: [train_unconditional.py](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/train_unconditional.py), [requirements.txt](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/requirements.txt)
- **์์ฌ์ด ์์ ** (Easy-to-tweak) : ์ ํฌ๋ ๊ฐ๋ฅํ๋ฉด ๋ง์ ์ ์ฆ ์ผ์ด์ค๋ค์ ์ ๊ณตํ๊ณ ์ ํฉ๋๋ค. ํ์ง๋ง ์์๋ ๊ฒฐ๊ตญ ๊ทธ์ ์์๋ผ๋ ์ ๋ค ๊ธฐ์ตํด์ฃผ์ธ์. ์ฌ๊ธฐ์ ์ ๊ณต๋๋ ์์์ฝ๋๋ค์ ๊ทธ์ ๋จ์ํ ๋ณต์ฌ-๋ถํ๋ฃ๊ธฐํ๋ ์์ผ๋ก๋ ์ฌ๋ฌ๋ถ์ด ๋ง์ฃผํ ๋ฌธ์ ๋ค์ ์์ฝ๊ฒ ํด๊ฒฐํ ์ ์์ ๊ฒ์
๋๋ค. ๋ค์ ๋งํด ์ด๋ ์ ๋๋ ์ฌ๋ฌ๋ถ์ ์ํฉ๊ณผ ๋์ฆ์ ๋ง์ถฐ ์ฝ๋๋ฅผ ์ผ์ ๋ถ๋ถ ๊ณ ์ณ๋๊ฐ์ผ ํ ๊ฒ์
๋๋ค. ๋ฐ๋ผ์ ๋๋ถ๋ถ์ ํ์ต ์์๋ค์ ๋ฐ์ดํฐ์ ์ ์ฒ๋ฆฌ ๊ณผ์ ๊ณผ ํ์ต ๊ณผ์ ์ ๋ํ ์ฝ๋๋ค์ ํจ๊ป ์ ๊ณตํจ์ผ๋ก์จ, ์ฌ์ฉ์๊ฐ ๋์ฆ์ ๋ง๊ฒ ์์ฌ์ด ์์ ํ ์ ์๋๋ก ๋๊ณ ์์ต๋๋ค.
- **์
๋ฌธ์ ์นํ์ ์ธ** (Beginner-friendly) : ์ด๋ฒ ์ฑํฐ๋ diffusion ๋ชจ๋ธ๊ณผ `diffusers` ๋ผ์ด๋ธ๋ฌ๋ฆฌ์ ๋ํ ์ ๋ฐ์ ์ธ ์ดํด๋ฅผ ๋๊ธฐ ์ํด ์์ฑ๋์์ต๋๋ค. ๋ฐ๋ผ์ diffusion ๋ชจ๋ธ์ ๋ํ ์ต์ SOTA (state-of-the-art) ๋ฐฉ๋ฒ๋ก ๋ค ๊ฐ์ด๋ฐ์๋, ์
๋ฌธ์์๊ฒ๋ ๋ง์ด ์ด๋ ค์ธ ์ ์๋ค๊ณ ํ๋จ๋๋ฉด, ํด๋น ๋ฐฉ๋ฒ๋ก ๋ค์ ์ฌ๊ธฐ์ ๋ค๋ฃจ์ง ์์ผ๋ ค๊ณ ํฉ๋๋ค.
- **ํ๋์ ํ์คํฌ๋ง ํฌํจํ ๊ฒ**(One-purpose-only): ์ฌ๊ธฐ์ ๋ค๋ฃฐ ์์๋ค์ ํ๋์ ํ์คํฌ๋ง ํฌํจํ๊ณ ์์ด์ผ ํฉ๋๋ค. ๋ฌผ๋ก ์ด๋ฏธ์ง ์ดํด์ํ(super-resolution)์ ์ด๋ฏธ์ง ๋ณด์ (modification)๊ณผ ๊ฐ์ ์ ์ฌํ ๋ชจ๋ธ๋ง ํ๋ก์ธ์ค๋ฅผ ๊ฐ๋ ํ์คํฌ๋ค์ด ์กด์ฌํ๊ฒ ์ง๋ง, ํ๋์ ์์ ์ ํ๋์ ํ์คํฌ๋ง์ ๋ด๋ ๊ฒ์ด ๋ ์ดํดํ๊ธฐ ์ฉ์ดํ๋ค๊ณ ํ๋จํ๊ธฐ ๋๋ฌธ์
๋๋ค.
์ ํฌ๋ diffusion ๋ชจ๋ธ์ ๋ํ์ ์ธ ํ์คํฌ๋ค์ ๋ค๋ฃจ๋ ๊ณต์ ์์ ๋ฅผ ์ ๊ณตํ๊ณ ์์ต๋๋ค. *๊ณต์* ์์ ๋ ํ์ฌ ์งํํ์ผ๋ก `diffusers` ๊ด๋ฆฌ์๋ค(maintainers)์ ์ํด ๊ด๋ฆฌ๋๊ณ ์์ต๋๋ค. ๋ํ ์ ํฌ๋ ์์ ์ ์ํ ์ ํฌ์ ์ฒ ํ์ ์๊ฒฉํ๊ฒ ๋ฐ๋ฅด๊ณ ์ ๋
ธ๋ ฅํ๊ณ ์์ต๋๋ค. ํน์ ์ฌ๋ฌ๋ถ๊ป์ ์ด๋ฌํ ์์๊ฐ ๋ฐ๋์ ํ์ํ๋ค๊ณ ์๊ฐ๋์ ๋ค๋ฉด, ์ธ์ ๋ ์ง [Feature Request](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feature_request.md&title=) ํน์ ์ง์ [Pull Request](https://github.com/huggingface/diffusers/compare)๋ฅผ ์ฃผ์๊ธฐ ๋ฐ๋๋๋ค. ์ ํฌ๋ ์ธ์ ๋ ํ์์
๋๋ค!
ํ์ต ์์๋ค์ ๋ค์ํ ํ์คํฌ๋ค์ ๋ํด diffusion ๋ชจ๋ธ์ ์ฌ์ ํ์ต(pretrain)ํ๊ฑฐ๋ ํ์ธํ๋(fine-tuning)ํ๋ ๋ฒ์ ๋ณด์ฌ์ค๋๋ค. ํ์ฌ ๋ค์๊ณผ ๊ฐ์ ์์ ๋ค์ ์ง์ํ๊ณ ์์ต๋๋ค.
- [Unconditional Training](./unconditional_training)
- [Text-to-Image Training](./text2image)
- [Text Inversion](./text_inversion)
- [Dreambooth](./dreambooth)
memory-efficient attention ์ฐ์ฐ์ ์ํํ๊ธฐ ์ํด, ๊ฐ๋ฅํ๋ฉด [xFormers](../optimization/xformers)๋ฅผ ์ค์นํด์ฃผ์๊ธฐ ๋ฐ๋๋๋ค. ์ด๋ฅผ ํตํด ํ์ต ์๋๋ฅผ ๋๋ฆฌ๊ณ ๋ฉ๋ชจ๋ฆฌ์ ๋ํ ๋ถ๋ด์ ์ค์ผ ์ ์์ต๋๋ค.
| Task | ๐ค Accelerate | ๐ค Datasets | Colab
|---|---|:---:|:---:|
| [**Unconditional Image Generation**](./unconditional_training) | โ
| โ
| [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb)
| [**Text-to-Image fine-tuning**](./text2image) | โ
| โ
|
| [**Textual Inversion**](./text_inversion) | โ
| - | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb)
| [**Dreambooth**](./dreambooth) | โ
| - | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb)
| [**Training with LoRA**](./lora) | โ
| - | - |
| [**ControlNet**](./controlnet) | โ
| โ
| - |
| [**InstructPix2Pix**](./instructpix2pix) | โ
| โ
| - |
| [**Custom Diffusion**](./custom_diffusion) | โ
| โ
| - |
## ์ปค๋ฎค๋ํฐ
๊ณต์ ์์ ์ธ์๋ **์ปค๋ฎค๋ํฐ ์์ ** ์ญ์ ์ ๊ณตํ๊ณ ์์ต๋๋ค. ํด๋น ์์ ๋ค์ ์ฐ๋ฆฌ์ ์ปค๋ฎค๋ํฐ์ ์ํด ๊ด๋ฆฌ๋ฉ๋๋ค. ์ปค๋ฎค๋ํฐ ์์ฉจ๋ ํ์ต ์์๋ ์ถ๋ก ํ์ดํ๋ผ์ธ์ผ๋ก ๊ตฌ์ฑ๋ ์ ์์ต๋๋ค. ์ด๋ฌํ ์ปค๋ฎค๋ํฐ ์์๋ค์ ๊ฒฝ์ฐ, ์์ ์ ์ํ๋ ์ฒ ํ๋ค์ ์ข ๋ ๊ด๋ํ๊ฒ ์ ์ฉํ๊ณ ์์ต๋๋ค. ๋ํ ์ด๋ฌํ ์ปค๋ฎค๋ํฐ ์์๋ค์ ๊ฒฝ์ฐ, ๋ชจ๋ ์ด์๋ค์ ๋ํ ์ ์ง๋ณด์๋ฅผ ๋ณด์ฅํ ์๋ ์์ต๋๋ค.
์ ์ฉํ๊ธด ํ์ง๋ง, ์์ง์ ๋์ค์ ์ด์ง ๋ชปํ๊ฑฐ๋ ์ ํฌ์ ์ฒ ํ์ ๋ถํฉํ์ง ์๋ ์์ ๋ค์ [community examples](https://github.com/huggingface/diffusers/tree/main/examples/community) ํด๋์ ๋ด๊ธฐ๊ฒ ๋ฉ๋๋ค.
**Note**: ์ปค๋ฎค๋ํฐ ์์ ๋ `diffusers`์ ๊ธฐ์ฌ(contribution)๋ฅผ ํฌ๋งํ๋ ๋ถ๋ค์๊ฒ [์์ฃผ ์ข์ ๊ธฐ์ฌ ์๋จ](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)์ด ๋ ์ ์์ต๋๋ค.
## ์ฃผ๋ชฉํ ์ฌํญ๋ค
์ต์ ๋ฒ์ ์ ์์ ์ฝ๋๋ค์ ์ฑ๊ณต์ ์ธ ๊ตฌ๋์ ๋ณด์ฅํ๊ธฐ ์ํด์๋, ๋ฐ๋์ **์์ค์ฝ๋๋ฅผ ํตํด `diffusers`๋ฅผ ์ค์นํด์ผ ํ๋ฉฐ,** ํด๋น ์์ ์ฝ๋๋ค์ด ์๊ตฌํ๋ ๋ํ๋์๋ค ์ญ์ ์ค์นํด์ผ ํฉ๋๋ค. ์ด๋ฅผ ์ํด ์๋ก์ด ๊ฐ์ ํ๊ฒฝ์ ๊ตฌ์ถํ๊ณ ๋ค์์ ๋ช
๋ น์ด๋ฅผ ์คํํด์ผ ํฉ๋๋ค.
```bash
git clone https://github.com/huggingface/diffusers
cd diffusers
pip install .
```
๊ทธ ๋ค์ `cd` ๋ช
๋ น์ด๋ฅผ ํตํด ํด๋น ์์ ๋๋ ํ ๋ฆฌ์ ์ ๊ทผํด์ ๋ค์ ๋ช
๋ น์ด๋ฅผ ์คํํ๋ฉด ๋ฉ๋๋ค.
```bash
pip install -r requirements.txt
```
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/training/unconditional_training.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Unconditional ์ด๋ฏธ์ง ์์ฑ
unconditional ์ด๋ฏธ์ง ์์ฑ์ text-to-image ๋๋ image-to-image ๋ชจ๋ธ๊ณผ ๋ฌ๋ฆฌ ํ
์คํธ๋ ์ด๋ฏธ์ง์ ๋ํ ์กฐ๊ฑด์ด ์์ด ํ์ต ๋ฐ์ดํฐ ๋ถํฌ์ ์ ์ฌํ ์ด๋ฏธ์ง๋ง์ ์์ฑํฉ๋๋ค.
<iframe
src="https://stevhliu-ddpm-butterflies-128.hf.space"
frameborder="0"
width="850"
height="550"
></iframe>
์ด ๊ฐ์ด๋์์๋ ๊ธฐ์กด์ ์กด์ฌํ๋ ๋ฐ์ดํฐ์
๊ณผ ์์ ๋ง์ ์ปค์คํ
๋ฐ์ดํฐ์
์ ๋ํด unconditional image generation ๋ชจ๋ธ์ ํ๋ จํ๋ ๋ฐฉ๋ฒ์ ์ค๋ช
ํฉ๋๋ค. ํ๋ จ ์ธ๋ถ ์ฌํญ์ ๋ํด ๋ ์์ธํ ์๊ณ ์ถ๋ค๋ฉด unconditional image generation์ ์ํ ๋ชจ๋ ํ์ต ์คํฌ๋ฆฝํธ๋ฅผ [์ฌ๊ธฐ](https://github.com/huggingface/diffusers/tree/main/examples/unconditional_image_generation)์์ ํ์ธํ ์ ์์ต๋๋ค.
์คํฌ๋ฆฝํธ๋ฅผ ์คํํ๊ธฐ ์ , ๋จผ์ ์์กด์ฑ ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ค์ ์ค์นํด์ผ ํฉ๋๋ค.
```bash
pip install diffusers[training] accelerate datasets
```
๊ทธ ๋ค์ ๐ค [Accelerate](https://github.com/huggingface/accelerate/) ํ๊ฒฝ์ ์ด๊ธฐํํฉ๋๋ค.
```bash
accelerate config
```
๋ณ๋์ ์ค์ ์์ด ๊ธฐ๋ณธ ์ค์ ์ผ๋ก ๐ค [Accelerate](https://github.com/huggingface/accelerate/) ํ๊ฒฝ์ ์ด๊ธฐํํด๋ด
์๋ค.
```bash
accelerate config default
```
๋
ธํธ๋ถ๊ณผ ๊ฐ์ ๋ํํ ์์ ์ง์ํ์ง ์๋ ํ๊ฒฝ์ ๊ฒฝ์ฐ, ๋ค์๊ณผ ๊ฐ์ด ์ฌ์ฉํด๋ณผ ์๋ ์์ต๋๋ค.
```py
from accelerate.utils import write_basic_config
write_basic_config()
```
## ๋ชจ๋ธ์ ํ๋ธ์ ์
๋ก๋ํ๊ธฐ
ํ์ต ์คํฌ๋ฆฝํธ์ ๋ค์ ์ธ์๋ฅผ ์ถ๊ฐํ์ฌ ํ๋ธ์ ๋ชจ๋ธ์ ์
๋ก๋ํ ์ ์์ต๋๋ค.
```bash
--push_to_hub
```
## ์ฒดํฌํฌ์ธํธ ์ ์ฅํ๊ณ ๋ถ๋ฌ์ค๊ธฐ
ํ๋ จ ์ค ๋ฌธ์ ๊ฐ ๋ฐ์ํ ๊ฒฝ์ฐ๋ฅผ ๋๋นํ์ฌ ์ฒดํฌํฌ์ธํธ๋ฅผ ์ ๊ธฐ์ ์ผ๋ก ์ ์ฅํ๋ ๊ฒ์ด ์ข์ต๋๋ค. ์ฒดํฌํฌ์ธํธ๋ฅผ ์ ์ฅํ๋ ค๋ฉด ํ์ต ์คํฌ๋ฆฝํธ์ ๋ค์ ์ธ์๋ฅผ ์ ๋ฌํฉ๋๋ค:
```bash
--checkpointing_steps=500
```
์ ์ฒด ํ๋ จ ์ํ๋ 500์คํ
๋ง๋ค `output_dir`์ ํ์ ํด๋์ ์ ์ฅ๋๋ฉฐ, ํ์ต ์คํฌ๋ฆฝํธ์ `--resume_from_checkpoint` ์ธ์๋ฅผ ์ ๋ฌํจ์ผ๋ก์จ ์ฒดํฌํฌ์ธํธ๋ฅผ ๋ถ๋ฌ์ค๊ณ ํ๋ จ์ ์ฌ๊ฐํ ์ ์์ต๋๋ค.
```bash
--resume_from_checkpoint="checkpoint-1500"
```
## ํ์ธํ๋
์ด์ ํ์ต ์คํฌ๋ฆฝํธ๋ฅผ ์์ํ ์ค๋น๊ฐ ๋์์ต๋๋ค! `--dataset_name` ์ธ์์ ํ์ธํ๋ํ ๋ฐ์ดํฐ์
์ด๋ฆ์ ์ง์ ํ ๋ค์, `--output_dir` ์ธ์์ ์ง์ ๋ ๊ฒฝ๋ก๋ก ์ ์ฅํฉ๋๋ค. ๋ณธ์ธ๋ง์ ๋ฐ์ดํฐ์
๋ฅผ ์ฌ์ฉํ๋ ค๋ฉด, [ํ์ต์ฉ ๋ฐ์ดํฐ์
๋ง๋ค๊ธฐ](create_dataset) ๊ฐ์ด๋๋ฅผ ์ฐธ์กฐํ์ธ์.
ํ์ต ์คํฌ๋ฆฝํธ๋ `diffusion_pytorch_model.bin` ํ์ผ์ ์์ฑํ๊ณ , ๊ทธ๊ฒ์ ๋น์ ์ ๋ฆฌํฌ์งํ ๋ฆฌ์ ์ ์ฅํฉ๋๋ค.
<Tip>
๐ก ์ ์ฒด ํ์ต์ V100 GPU 4๊ฐ๋ฅผ ์ฌ์ฉํ ๊ฒฝ์ฐ, 2์๊ฐ์ด ์์๋ฉ๋๋ค.
</Tip>
์๋ฅผ ๋ค์ด, [Oxford Flowers](https://huggingface.co/datasets/huggan/flowers-102-categories) ๋ฐ์ดํฐ์
์ ์ฌ์ฉํด ํ์ธํ๋ํ ๊ฒฝ์ฐ:
```bash
accelerate launch train_unconditional.py \
--dataset_name="huggan/flowers-102-categories" \
--resolution=64 \
--output_dir="ddpm-ema-flowers-64" \
--train_batch_size=16 \
--num_epochs=100 \
--gradient_accumulation_steps=1 \
--learning_rate=1e-4 \
--lr_warmup_steps=500 \
--mixed_precision=no \
--push_to_hub
```
<div class="flex justify-center">
<img src="https://user-images.githubusercontent.com/26864830/180248660-a0b143d0-b89a-42c5-8656-2ebf6ece7e52.png"/>
</div>
[Pokemon](https://huggingface.co/datasets/huggan/pokemon) ๋ฐ์ดํฐ์
์ ์ฌ์ฉํ ๊ฒฝ์ฐ:
```bash
accelerate launch train_unconditional.py \
--dataset_name="huggan/pokemon" \
--resolution=64 \
--output_dir="ddpm-ema-pokemon-64" \
--train_batch_size=16 \
--num_epochs=100 \
--gradient_accumulation_steps=1 \
--learning_rate=1e-4 \
--lr_warmup_steps=500 \
--mixed_precision=no \
--push_to_hub
```
<div class="flex justify-center">
<img src="https://user-images.githubusercontent.com/26864830/180248200-928953b4-db38-48db-b0c6-8b740fe6786f.png"/>
</div>
### ์ฌ๋ฌ๊ฐ์ GPU๋ก ํ๋ จํ๊ธฐ
`accelerate`์ ์ฌ์ฉํ๋ฉด ์ํํ ๋ค์ค GPU ํ๋ จ์ด ๊ฐ๋ฅํฉ๋๋ค. `accelerate`์ ์ฌ์ฉํ์ฌ ๋ถ์ฐ ํ๋ จ์ ์คํํ๋ ค๋ฉด [์ฌ๊ธฐ](https://huggingface.co/docs/accelerate/basic_tutorials/launch) ์ง์นจ์ ๋ฐ๋ฅด์ธ์. ๋ค์์ ๋ช
๋ น์ด ์์ ์
๋๋ค.
```bash
accelerate launch --mixed_precision="fp16" --multi_gpu train_unconditional.py \
--dataset_name="huggan/pokemon" \
--resolution=64 --center_crop --random_flip \
--output_dir="ddpm-ema-pokemon-64" \
--train_batch_size=16 \
--num_epochs=100 \
--gradient_accumulation_steps=1 \
--use_ema \
--learning_rate=1e-4 \
--lr_warmup_steps=500 \
--mixed_precision="fp16" \
--logger="wandb" \
--push_to_hub
```
| 0
|
hf_public_repos/diffusers/docs/source/ko
|
hf_public_repos/diffusers/docs/source/ko/training/dreambooth.md
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# DreamBooth
[DreamBooth](https://arxiv.org/abs/2208.12242)๋ ํ ์ฃผ์ ์ ๋ํ ์ ์ ์ด๋ฏธ์ง(3~5๊ฐ)๋ง์ผ๋ก๋ stable diffusion๊ณผ ๊ฐ์ด text-to-image ๋ชจ๋ธ์ ๊ฐ์ธํํ ์ ์๋ ๋ฐฉ๋ฒ์
๋๋ค. ์ด๋ฅผ ํตํด ๋ชจ๋ธ์ ๋ค์ํ ์ฅ๋ฉด, ํฌ์ฆ ๋ฐ ์ฅ๋ฉด(๋ทฐ)์์ ํผ์ฌ์ฒด์ ๋ํด ๋งฅ๋ฝํ(contextualized)๋ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ ์ ์์ต๋๋ค.

<small>์์์ Dreambooth ์์ <a href="https://dreambooth.github.io">project's blog.</a></small>
์ด ๊ฐ์ด๋๋ ๋ค์ํ GPU, Flax ์ฌ์์ ๋ํด [`CompVis/stable-diffusion-v1-4`](https://huggingface.co/CompVis/stable-diffusion-v1-4) ๋ชจ๋ธ๋ก DreamBooth๋ฅผ ํ์ธํ๋ํ๋ ๋ฐฉ๋ฒ์ ๋ณด์ฌ์ค๋๋ค. ๋ ๊น์ด ํ๊ณ ๋ค์ด ์๋ ๋ฐฉ์์ ํ์ธํ๋ ๋ฐ ๊ด์ฌ์ด ์๋ ๊ฒฝ์ฐ, ์ด ๊ฐ์ด๋์ ์ฌ์ฉ๋ DreamBooth์ ๋ชจ๋ ํ์ต ์คํฌ๋ฆฝํธ๋ฅผ [์ฌ๊ธฐ](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth)์์ ์ฐพ์ ์ ์์ต๋๋ค.
์คํฌ๋ฆฝํธ๋ฅผ ์คํํ๊ธฐ ์ ์ ๋ผ์ด๋ธ๋ฌ๋ฆฌ์ ํ์ต์ ํ์ํ dependencies๋ฅผ ์ค์นํด์ผ ํฉ๋๋ค. ๋ํ `main` GitHub ๋ธ๋์น์์ ๐งจ Diffusers๋ฅผ ์ค์นํ๋ ๊ฒ์ด ์ข์ต๋๋ค.
```bash
pip install git+https://github.com/huggingface/diffusers
pip install -U -r diffusers/examples/dreambooth/requirements.txt
```
xFormers๋ ํ์ต์ ํ์ํ ์๊ตฌ ์ฌํญ์ ์๋์ง๋ง, ๊ฐ๋ฅํ๋ฉด [์ค์น](../optimization/xformers)ํ๋ ๊ฒ์ด ์ข์ต๋๋ค. ํ์ต ์๋๋ฅผ ๋์ด๊ณ ๋ฉ๋ชจ๋ฆฌ ์ฌ์ฉ๋์ ์ค์ผ ์ ์๊ธฐ ๋๋ฌธ์
๋๋ค.
๋ชจ๋ dependencies์ ์ค์ ํ ํ ๋ค์์ ์ฌ์ฉํ์ฌ [๐ค Accelerate](https://github.com/huggingface/accelerate/) ํ๊ฒฝ์ ๋ค์๊ณผ ๊ฐ์ด ์ด๊ธฐํํฉ๋๋ค:
```bash
accelerate config
```
๋ณ๋ ์ค์ ์์ด ๊ธฐ๋ณธ ๐ค Accelerate ํ๊ฒฝ์ ์ค์นํ๋ ค๋ฉด ๋ค์์ ์คํํฉ๋๋ค:
```bash
accelerate config default
```
๋๋ ํ์ฌ ํ๊ฒฝ์ด ๋
ธํธ๋ถ๊ณผ ๊ฐ์ ๋ํํ ์
ธ์ ์ง์ํ์ง ์๋ ๊ฒฝ์ฐ ๋ค์์ ์ฌ์ฉํ ์ ์์ต๋๋ค:
```py
from accelerate.utils import write_basic_config
write_basic_config()
```
## ํ์ธํ๋
<Tip warning={true}>
DreamBooth ํ์ธํ๋์ ํ์ดํผํ๋ผ๋ฏธํฐ์ ๋งค์ฐ ๋ฏผ๊ฐํ๊ณ ๊ณผ์ ํฉ๋๊ธฐ ์ฝ์ต๋๋ค. ์ ์ ํ ํ์ดํผํ๋ผ๋ฏธํฐ๋ฅผ ์ ํํ๋ ๋ฐ ๋์์ด ๋๋๋ก ๋ค์ํ ๊ถ์ฅ ์ค์ ์ด ํฌํจ๋ [์ฌ์ธต ๋ถ์](https://huggingface.co/blog/dreambooth)์ ์ดํด๋ณด๋ ๊ฒ์ด ์ข์ต๋๋ค.
</Tip>
<frameworkcontent>
<pt>
[๋ช ์ฅ์ ๊ฐ์์ง ์ด๋ฏธ์ง๋ค](https://drive.google.com/drive/folders/1BO_dyz-p65qhBRRMRA4TbZ8qW4rB99JZ)๋ก DreamBooth๋ฅผ ์๋ํด๋ด
์๋ค.
์ด๋ฅผ ๋ค์ด๋ก๋ํด ๋๋ ํฐ๋ฆฌ์ ์ ์ฅํ ๋ค์ `INSTANCE_DIR` ํ๊ฒฝ ๋ณ์๋ฅผ ํด๋น ๊ฒฝ๋ก๋ก ์ค์ ํฉ๋๋ค:
```bash
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
export INSTANCE_DIR="path_to_training_images"
export OUTPUT_DIR="path_to_saved_model"
```
๊ทธ๋ฐ ๋ค์, ๋ค์ ๋ช
๋ น์ ์ฌ์ฉํ์ฌ ํ์ต ์คํฌ๋ฆฝํธ๋ฅผ ์คํํ ์ ์์ต๋๋ค (์ ์ฒด ํ์ต ์คํฌ๋ฆฝํธ๋ [์ฌ๊ธฐ](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py)์์ ์ฐพ์ ์ ์์ต๋๋ค):
```bash
accelerate launch train_dreambooth.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--instance_data_dir=$INSTANCE_DIR \
--output_dir=$OUTPUT_DIR \
--instance_prompt="a photo of sks dog" \
--resolution=512 \
--train_batch_size=1 \
--gradient_accumulation_steps=1 \
--learning_rate=5e-6 \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--max_train_steps=400
```
</pt>
<jax>
TPU์ ์ก์ธ์คํ ์ ์๊ฑฐ๋ ๋ ๋น ๋ฅด๊ฒ ํ๋ จํ๊ณ ์ถ๋ค๋ฉด [Flax ํ์ต ์คํฌ๋ฆฝํธ](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_flax.py)๋ฅผ ์ฌ์ฉํด ๋ณผ ์ ์์ต๋๋ค. Flax ํ์ต ์คํฌ๋ฆฝํธ๋ gradient checkpointing ๋๋ gradient accumulation์ ์ง์ํ์ง ์์ผ๋ฏ๋ก, ๋ฉ๋ชจ๋ฆฌ๊ฐ 30GB ์ด์์ธ GPU๊ฐ ํ์ํฉ๋๋ค.
์คํฌ๋ฆฝํธ๋ฅผ ์คํํ๊ธฐ ์ ์ ์๊ตฌ ์ฌํญ์ด ์ค์น๋์ด ์๋์ง ํ์ธํ์ญ์์ค.
```bash
pip install -U -r requirements.txt
```
๊ทธ๋ฌ๋ฉด ๋ค์ ๋ช
๋ น์ด๋ก ํ์ต ์คํฌ๋ฆฝํธ๋ฅผ ์คํ์ํฌ ์ ์์ต๋๋ค:
```bash
export MODEL_NAME="duongna/stable-diffusion-v1-4-flax"
export INSTANCE_DIR="path-to-instance-images"
export OUTPUT_DIR="path-to-save-model"
python train_dreambooth_flax.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--instance_data_dir=$INSTANCE_DIR \
--output_dir=$OUTPUT_DIR \
--instance_prompt="a photo of sks dog" \
--resolution=512 \
--train_batch_size=1 \
--learning_rate=5e-6 \
--max_train_steps=400
```
</jax>
</frameworkcontent>
### Prior-preserving(์ฌ์ ๋ณด์กด) loss๋ฅผ ์ฌ์ฉํ ํ์ธํ๋
๊ณผ์ ํฉ๊ณผ language drift๋ฅผ ๋ฐฉ์งํ๊ธฐ ์ํด ์ฌ์ ๋ณด์กด์ด ์ฌ์ฉ๋ฉ๋๋ค(๊ด์ฌ์ด ์๋ ๊ฒฝ์ฐ [๋
ผ๋ฌธ](https://arxiv.org/abs/2208.12242)์ ์ฐธ์กฐํ์ธ์). ์ฌ์ ๋ณด์กด์ ์ํด ๋์ผํ ํด๋์ค์ ๋ค๋ฅธ ์ด๋ฏธ์ง๋ฅผ ํ์ต ํ๋ก์ธ์ค์ ์ผ๋ถ๋ก ์ฌ์ฉํฉ๋๋ค. ์ข์ ์ ์ Stable Diffusion ๋ชจ๋ธ ์์ฒด๋ฅผ ์ฌ์ฉํ์ฌ ์ด๋ฌํ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ ์ ์๋ค๋ ๊ฒ์
๋๋ค! ํ์ต ์คํฌ๋ฆฝํธ๋ ์์ฑ๋ ์ด๋ฏธ์ง๋ฅผ ์ฐ๋ฆฌ๊ฐ ์ง์ ํ ๋ก์ปฌ ๊ฒฝ๋ก์ ์ ์ฅํฉ๋๋ค.
์ ์๋ค์ ๋ฐ๋ฅด๋ฉด ์ฌ์ ๋ณด์กด์ ์ํด `num_epochs * num_samples`๊ฐ์ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ๋ ๊ฒ์ด ์ข์ต๋๋ค. 200-300๊ฐ์์ ๋๋ถ๋ถ ์ ์๋ํฉ๋๋ค.
<frameworkcontent>
<pt>
```bash
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
export INSTANCE_DIR="path_to_training_images"
export CLASS_DIR="path_to_class_images"
export OUTPUT_DIR="path_to_saved_model"
accelerate launch train_dreambooth.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--instance_data_dir=$INSTANCE_DIR \
--class_data_dir=$CLASS_DIR \
--output_dir=$OUTPUT_DIR \
--with_prior_preservation --prior_loss_weight=1.0 \
--instance_prompt="a photo of sks dog" \
--class_prompt="a photo of dog" \
--resolution=512 \
--train_batch_size=1 \
--gradient_accumulation_steps=1 \
--learning_rate=5e-6 \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--num_class_images=200 \
--max_train_steps=800
```
</pt>
<jax>
```bash
export MODEL_NAME="duongna/stable-diffusion-v1-4-flax"
export INSTANCE_DIR="path-to-instance-images"
export CLASS_DIR="path-to-class-images"
export OUTPUT_DIR="path-to-save-model"
python train_dreambooth_flax.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--instance_data_dir=$INSTANCE_DIR \
--class_data_dir=$CLASS_DIR \
--output_dir=$OUTPUT_DIR \
--with_prior_preservation --prior_loss_weight=1.0 \
--instance_prompt="a photo of sks dog" \
--class_prompt="a photo of dog" \
--resolution=512 \
--train_batch_size=1 \
--learning_rate=5e-6 \
--num_class_images=200 \
--max_train_steps=800
```
</jax>
</frameworkcontent>
## ํ
์คํธ ์ธ์ฝ๋์ and UNet๋ก ํ์ธํ๋ํ๊ธฐ
ํด๋น ์คํฌ๋ฆฝํธ๋ฅผ ์ฌ์ฉํ๋ฉด `unet`๊ณผ ํจ๊ป `text_encoder`๋ฅผ ํ์ธํ๋ํ ์ ์์ต๋๋ค. ์คํ์์(์์ธํ ๋ด์ฉ์ [๐งจ Diffusers๋ฅผ ์ฌ์ฉํด DreamBooth๋ก Stable Diffusion ํ์ตํ๊ธฐ](https://huggingface.co/blog/dreambooth) ๊ฒ์๋ฌผ์ ํ์ธํ์ธ์), ํนํ ์ผ๊ตด ์ด๋ฏธ์ง๋ฅผ ์์ฑํ ๋ ํจ์ฌ ๋ ๋์ ๊ฒฐ๊ณผ๋ฅผ ์ป์ ์ ์์ต๋๋ค.
<Tip warning={true}>
ํ
์คํธ ์ธ์ฝ๋๋ฅผ ํ์ต์ํค๋ ค๋ฉด ์ถ๊ฐ ๋ฉ๋ชจ๋ฆฌ๊ฐ ํ์ํด 16GB GPU๋ก๋ ๋์ํ์ง ์์ต๋๋ค. ์ด ์ต์
์ ์ฌ์ฉํ๋ ค๋ฉด ์ต์ 24GB VRAM์ด ํ์ํฉ๋๋ค.
</Tip>
`--train_text_encoder` ์ธ์๋ฅผ ํ์ต ์คํฌ๋ฆฝํธ์ ์ ๋ฌํ์ฌ `text_encoder` ๋ฐ `unet`์ ํ์ธํ๋ํ ์ ์์ต๋๋ค:
<frameworkcontent>
<pt>
```bash
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
export INSTANCE_DIR="path_to_training_images"
export CLASS_DIR="path_to_class_images"
export OUTPUT_DIR="path_to_saved_model"
accelerate launch train_dreambooth.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--train_text_encoder \
--instance_data_dir=$INSTANCE_DIR \
--class_data_dir=$CLASS_DIR \
--output_dir=$OUTPUT_DIR \
--with_prior_preservation --prior_loss_weight=1.0 \
--instance_prompt="a photo of sks dog" \
--class_prompt="a photo of dog" \
--resolution=512 \
--train_batch_size=1 \
--use_8bit_adam
--gradient_checkpointing \
--learning_rate=2e-6 \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--num_class_images=200 \
--max_train_steps=800
```
</pt>
<jax>
```bash
export MODEL_NAME="duongna/stable-diffusion-v1-4-flax"
export INSTANCE_DIR="path-to-instance-images"
export CLASS_DIR="path-to-class-images"
export OUTPUT_DIR="path-to-save-model"
python train_dreambooth_flax.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--train_text_encoder \
--instance_data_dir=$INSTANCE_DIR \
--class_data_dir=$CLASS_DIR \
--output_dir=$OUTPUT_DIR \
--with_prior_preservation --prior_loss_weight=1.0 \
--instance_prompt="a photo of sks dog" \
--class_prompt="a photo of dog" \
--resolution=512 \
--train_batch_size=1 \
--learning_rate=2e-6 \
--num_class_images=200 \
--max_train_steps=800
```
</jax>
</frameworkcontent>
## LoRA๋ก ํ์ธํ๋ํ๊ธฐ
DreamBooth์์ ๋๊ท๋ชจ ๋ชจ๋ธ์ ํ์ต์ ๊ฐ์ํํ๊ธฐ ์ํ ํ์ธํ๋ ๊ธฐ์ ์ธ LoRA(Low-Rank Adaptation of Large Language Models)๋ฅผ ์ฌ์ฉํ ์ ์์ต๋๋ค. ์์ธํ ๋ด์ฉ์ [LoRA ํ์ต](training/lora#dreambooth) ๊ฐ์ด๋๋ฅผ ์ฐธ์กฐํ์ธ์.
### ํ์ต ์ค ์ฒดํฌํฌ์ธํธ ์ ์ฅํ๊ธฐ
Dreambooth๋ก ํ๋ จํ๋ ๋์ ๊ณผ์ ํฉํ๊ธฐ ์ฌ์ฐ๋ฏ๋ก, ๋๋๋ก ํ์ต ์ค์ ์ ๊ธฐ์ ์ธ ์ฒดํฌํฌ์ธํธ๋ฅผ ์ ์ฅํ๋ ๊ฒ์ด ์ ์ฉํฉ๋๋ค. ์ค๊ฐ ์ฒดํฌํฌ์ธํธ ์ค ํ๋๊ฐ ์ต์ข
๋ชจ๋ธ๋ณด๋ค ๋ ์ ์๋ํ ์ ์์ต๋๋ค! ์ฒดํฌํฌ์ธํธ ์ ์ฅ ๊ธฐ๋ฅ์ ํ์ฑํํ๋ ค๋ฉด ํ์ต ์คํฌ๋ฆฝํธ์ ๋ค์ ์ธ์๋ฅผ ์ ๋ฌํด์ผ ํฉ๋๋ค:
```bash
--checkpointing_steps=500
```
์ด๋ ๊ฒ ํ๋ฉด `output_dir`์ ํ์ ํด๋์ ์ ์ฒด ํ์ต ์ํ๊ฐ ์ ์ฅ๋ฉ๋๋ค. ํ์ ํด๋ ์ด๋ฆ์ ์ ๋์ฌ `checkpoint-`๋ก ์์ํ๊ณ ์ง๊ธ๊น์ง ์ํ๋ step ์์
๋๋ค. ์์๋ก `checkpoint-1500`์ 1500 ํ์ต step ํ์ ์ ์ฅ๋ ์ฒดํฌํฌ์ธํธ์
๋๋ค.
#### ์ ์ฅ๋ ์ฒดํฌํฌ์ธํธ์์ ํ๋ จ ์ฌ๊ฐํ๊ธฐ
์ ์ฅ๋ ์ฒดํฌํฌ์ธํธ์์ ํ๋ จ์ ์ฌ๊ฐํ๋ ค๋ฉด, `--resume_from_checkpoint` ์ธ์๋ฅผ ์ ๋ฌํ ๋ค์ ์ฌ์ฉํ ์ฒดํฌํฌ์ธํธ์ ์ด๋ฆ์ ์ง์ ํ๋ฉด ๋ฉ๋๋ค. ํน์ ๋ฌธ์์ด `"latest"`๋ฅผ ์ฌ์ฉํ์ฌ ์ ์ฅ๋ ๋ง์ง๋ง ์ฒดํฌํฌ์ธํธ(์ฆ, step ์๊ฐ ๊ฐ์ฅ ๋ง์ ์ฒดํฌํฌ์ธํธ)์์ ์ฌ๊ฐํ ์๋ ์์ต๋๋ค. ์๋ฅผ ๋ค์ด ๋ค์์ 1500 step ํ์ ์ ์ฅ๋ ์ฒดํฌํฌ์ธํธ์์๋ถํฐ ํ์ต์ ์ฌ๊ฐํฉ๋๋ค:
```bash
--resume_from_checkpoint="checkpoint-1500"
```
์ํ๋ ๊ฒฝ์ฐ ์ผ๋ถ ํ์ดํผํ๋ผ๋ฏธํฐ๋ฅผ ์กฐ์ ํ ์ ์์ต๋๋ค.
#### ์ ์ฅ๋ ์ฒดํฌํฌ์ธํธ๋ฅผ ์ฌ์ฉํ์ฌ ์ถ๋ก ์ํํ๊ธฐ
์ ์ฅ๋ ์ฒดํฌํฌ์ธํธ๋ ํ๋ จ ์ฌ๊ฐ์ ์ ํฉํ ํ์์ผ๋ก ์ ์ฅ๋ฉ๋๋ค. ์ฌ๊ธฐ์๋ ๋ชจ๋ธ ๊ฐ์ค์น๋ฟ๋ง ์๋๋ผ ์ตํฐ๋ง์ด์ , ๋ฐ์ดํฐ ๋ก๋ ๋ฐ ํ์ต๋ฅ ์ ์ํ๋ ํฌํจ๋ฉ๋๋ค.
**`"accelerate>=0.16.0"`**์ด ์ค์น๋ ๊ฒฝ์ฐ ๋ค์ ์ฝ๋๋ฅผ ์ฌ์ฉํ์ฌ ์ค๊ฐ ์ฒดํฌํฌ์ธํธ์์ ์ถ๋ก ์ ์คํํฉ๋๋ค.
```python
from diffusers import DiffusionPipeline, UNet2DConditionModel
from transformers import CLIPTextModel
import torch
# ํ์ต์ ์ฌ์ฉ๋ ๊ฒ๊ณผ ๋์ผํ ์ธ์(model, revision)๋ก ํ์ดํ๋ผ์ธ์ ๋ถ๋ฌ์ต๋๋ค.
model_id = "CompVis/stable-diffusion-v1-4"
unet = UNet2DConditionModel.from_pretrained("/sddata/dreambooth/daruma-v2-1/checkpoint-100/unet")
# `args.train_text_encoder`๋ก ํ์ตํ ๊ฒฝ์ฐ๋ฉด ํ
์คํธ ์ธ์ฝ๋๋ฅผ ๊ผญ ๋ถ๋ฌ์ค์ธ์
text_encoder = CLIPTextModel.from_pretrained("/sddata/dreambooth/daruma-v2-1/checkpoint-100/text_encoder")
pipeline = DiffusionPipeline.from_pretrained(model_id, unet=unet, text_encoder=text_encoder, dtype=torch.float16)
pipeline.to("cuda")
# ์ถ๋ก ์ ์ํํ๊ฑฐ๋ ์ ์ฅํ๊ฑฐ๋, ํ๋ธ์ ํธ์ํฉ๋๋ค.
pipeline.save_pretrained("dreambooth-pipeline")
```
If you have **`"accelerate<0.16.0"`** installed, you need to convert it to an inference pipeline first:
```python
from accelerate import Accelerator
from diffusers import DiffusionPipeline
# ํ์ต์ ์ฌ์ฉ๋ ๊ฒ๊ณผ ๋์ผํ ์ธ์(model, revision)๋ก ํ์ดํ๋ผ์ธ์ ๋ถ๋ฌ์ต๋๋ค.
model_id = "CompVis/stable-diffusion-v1-4"
pipeline = DiffusionPipeline.from_pretrained(model_id)
accelerator = Accelerator()
# ์ด๊ธฐ ํ์ต์ `--train_text_encoder`๊ฐ ์ฌ์ฉ๋ ๊ฒฝ์ฐ text_encoder๋ฅผ ์ฌ์ฉํฉ๋๋ค.
unet, text_encoder = accelerator.prepare(pipeline.unet, pipeline.text_encoder)
# ์ฒดํฌํฌ์ธํธ ๊ฒฝ๋ก๋ก๋ถํฐ ์ํ๋ฅผ ๋ณต์ํฉ๋๋ค. ์ฌ๊ธฐ์๋ ์ ๋ ๊ฒฝ๋ก๋ฅผ ์ฌ์ฉํด์ผ ํฉ๋๋ค.
accelerator.load_state("/sddata/dreambooth/daruma-v2-1/checkpoint-100")
# unwrapped ๋ชจ๋ธ๋ก ํ์ดํ๋ผ์ธ์ ๋ค์ ๋น๋ํฉ๋๋ค.(.unet and .text_encoder๋ก์ ํ ๋น๋ ์๋ํด์ผ ํฉ๋๋ค)
pipeline = DiffusionPipeline.from_pretrained(
model_id,
unet=accelerator.unwrap_model(unet),
text_encoder=accelerator.unwrap_model(text_encoder),
)
# ์ถ๋ก ์ ์ํํ๊ฑฐ๋ ์ ์ฅํ๊ฑฐ๋, ํ๋ธ์ ํธ์ํฉ๋๋ค.
pipeline.save_pretrained("dreambooth-pipeline")
```
## ๊ฐ GPU ์ฉ๋์์์ ์ต์ ํ
ํ๋์จ์ด์ ๋ฐ๋ผ 16GB์์ 8GB๊น์ง GPU์์ DreamBooth๋ฅผ ์ต์ ํํ๋ ๋ช ๊ฐ์ง ๋ฐฉ๋ฒ์ด ์์ต๋๋ค!
### xFormers
[xFormers](https://github.com/facebookresearch/xformers)๋ Transformers๋ฅผ ์ต์ ํํ๊ธฐ ์ํ toolbox์ด๋ฉฐ, ๐งจ Diffusers์์ ์ฌ์ฉ๋๋[memory-efficient attention](https://facebookresearch.github.io/xformers/components/ops.html#module-xformers.ops) ๋ฉ์ปค๋์ฆ์ ํฌํจํ๊ณ ์์ต๋๋ค. [xFormers๋ฅผ ์ค์น](./optimization/xformers)ํ ๋ค์ ํ์ต ์คํฌ๋ฆฝํธ์ ๋ค์ ์ธ์๋ฅผ ์ถ๊ฐํฉ๋๋ค:
```bash
--enable_xformers_memory_efficient_attention
```
xFormers๋ Flax์์ ์ฌ์ฉํ ์ ์์ต๋๋ค.
### ๊ทธ๋๋์ธํธ ์์์ผ๋ก ์ค์
๋ฉ๋ชจ๋ฆฌ ์ฌ์ฉ๋์ ์ค์ผ ์ ์๋ ๋ ๋ค๋ฅธ ๋ฐฉ๋ฒ์ [๊ธฐ์ธ๊ธฐ ์ค์ ](https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html)์ 0 ๋์ `None`์ผ๋ก ํ๋ ๊ฒ์
๋๋ค. ๊ทธ๋ฌ๋ ์ด๋ก ์ธํด ํน์ ๋์์ด ๋ณ๊ฒฝ๋ ์ ์์ผ๋ฏ๋ก ๋ฌธ์ ๊ฐ ๋ฐ์ํ๋ฉด ์ด ์ธ์๋ฅผ ์ ๊ฑฐํด ๋ณด์ญ์์ค. ํ์ต ์คํฌ๋ฆฝํธ์ ๋ค์ ์ธ์๋ฅผ ์ถ๊ฐํ์ฌ ๊ทธ๋๋์ธํธ๋ฅผ `None`์ผ๋ก ์ค์ ํฉ๋๋ค.
```bash
--set_grads_to_none
```
### 16GB GPU
Gradient checkpointing๊ณผ [bitsandbytes](https://github.com/TimDettmers/bitsandbytes)์ 8๋นํธ ์ตํฐ๋ง์ด์ ์ ๋์์ผ๋ก, 16GB GPU์์ dreambooth๋ฅผ ํ๋ จํ ์ ์์ต๋๋ค. bitsandbytes๊ฐ ์ค์น๋์ด ์๋์ง ํ์ธํ์ธ์:
```bash
pip install bitsandbytes
```
๊ทธ ๋ค์, ํ์ต ์คํฌ๋ฆฝํธ์ `--use_8bit_adam` ์ต์
์ ๋ช
์ํฉ๋๋ค:
```bash
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
export INSTANCE_DIR="path_to_training_images"
export CLASS_DIR="path_to_class_images"
export OUTPUT_DIR="path_to_saved_model"
accelerate launch train_dreambooth.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--instance_data_dir=$INSTANCE_DIR \
--class_data_dir=$CLASS_DIR \
--output_dir=$OUTPUT_DIR \
--with_prior_preservation --prior_loss_weight=1.0 \
--instance_prompt="a photo of sks dog" \
--class_prompt="a photo of dog" \
--resolution=512 \
--train_batch_size=1 \
--gradient_accumulation_steps=2 --gradient_checkpointing \
--use_8bit_adam \
--learning_rate=5e-6 \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--num_class_images=200 \
--max_train_steps=800
```
### 12GB GPU
12GB GPU์์ DreamBooth๋ฅผ ์คํํ๋ ค๋ฉด gradient checkpointing, 8๋นํธ ์ตํฐ๋ง์ด์ , xFormers๋ฅผ ํ์ฑํํ๊ณ ๊ทธ๋๋์ธํธ๋ฅผ `None`์ผ๋ก ์ค์ ํด์ผ ํฉ๋๋ค.
```bash
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
export INSTANCE_DIR="path-to-instance-images"
export CLASS_DIR="path-to-class-images"
export OUTPUT_DIR="path-to-save-model"
accelerate launch train_dreambooth.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--instance_data_dir=$INSTANCE_DIR \
--class_data_dir=$CLASS_DIR \
--output_dir=$OUTPUT_DIR \
--with_prior_preservation --prior_loss_weight=1.0 \
--instance_prompt="a photo of sks dog" \
--class_prompt="a photo of dog" \
--resolution=512 \
--train_batch_size=1 \
--gradient_accumulation_steps=1 --gradient_checkpointing \
--use_8bit_adam \
--enable_xformers_memory_efficient_attention \
--set_grads_to_none \
--learning_rate=2e-6 \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--num_class_images=200 \
--max_train_steps=800
```
### 8GB GPU์์ ํ์ตํ๊ธฐ
8GB GPU์ ๋ํด์๋ [DeepSpeed](https://www.deepspeed.ai/)๋ฅผ ์ฌ์ฉํด ์ผ๋ถ ํ
์๋ฅผ VRAM์์ CPU ๋๋ NVME๋ก ์คํ๋ก๋ํ์ฌ ๋ ์ ์ GPU ๋ฉ๋ชจ๋ฆฌ๋ก ํ์ตํ ์๋ ์์ต๋๋ค.
๐ค Accelerate ํ๊ฒฝ์ ๊ตฌ์ฑํ๋ ค๋ฉด ๋ค์ ๋ช
๋ น์ ์คํํ์ธ์:
```bash
accelerate config
```
ํ๊ฒฝ ๊ตฌ์ฑ ์ค์ DeepSpeed๋ฅผ ์ฌ์ฉํ ๊ฒ์ ํ์ธํ์ธ์.
๊ทธ๋ฌ๋ฉด DeepSpeed stage 2, fp16 ํผํฉ ์ ๋ฐ๋๋ฅผ ๊ฒฐํฉํ๊ณ ๋ชจ๋ธ ๋งค๊ฐ๋ณ์์ ์ตํฐ๋ง์ด์ ์ํ๋ฅผ ๋ชจ๋ CPU๋ก ์คํ๋ก๋ํ๋ฉด 8GB VRAM ๋ฏธ๋ง์์ ํ์ตํ ์ ์์ต๋๋ค.
๋จ์ ์ ๋ ๋ง์ ์์คํ
RAM(์ฝ 25GB)์ด ํ์ํ๋ค๋ ๊ฒ์
๋๋ค. ์ถ๊ฐ ๊ตฌ์ฑ ์ต์
์ [DeepSpeed ๋ฌธ์](https://huggingface.co/docs/accelerate/usage_guides/deepspeed)๋ฅผ ์ฐธ์กฐํ์ธ์.
๋ํ ๊ธฐ๋ณธ Adam ์ตํฐ๋ง์ด์ ๋ฅผ DeepSpeed์ ์ต์ ํ๋ Adam ๋ฒ์ ์ผ๋ก ๋ณ๊ฒฝํด์ผ ํฉ๋๋ค.
์ด๋ ์๋นํ ์๋ ํฅ์์ ์ํ Adam์ธ [`deepspeed.ops.adam.DeepSpeedCPUAdam`](https://deepspeed.readthedocs.io/en/latest/optimizers.html#adam-cpu)์
๋๋ค.
`DeepSpeedCPUAdam`์ ํ์ฑํํ๋ ค๋ฉด ์์คํ
์ CUDA toolchain ๋ฒ์ ์ด PyTorch์ ํจ๊ป ์ค์น๋ ๊ฒ๊ณผ ๋์ผํด์ผ ํฉ๋๋ค.
8๋นํธ ์ตํฐ๋ง์ด์ ๋ ํ์ฌ DeepSpeed์ ํธํ๋์ง ์๋ ๊ฒ ๊ฐ์ต๋๋ค.
๋ค์ ๋ช
๋ น์ผ๋ก ํ์ต์ ์์ํฉ๋๋ค:
```bash
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
export INSTANCE_DIR="path_to_training_images"
export CLASS_DIR="path_to_class_images"
export OUTPUT_DIR="path_to_saved_model"
accelerate launch train_dreambooth.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--instance_data_dir=$INSTANCE_DIR \
--class_data_dir=$CLASS_DIR \
--output_dir=$OUTPUT_DIR \
--with_prior_preservation --prior_loss_weight=1.0 \
--instance_prompt="a photo of sks dog" \
--class_prompt="a photo of dog" \
--resolution=512 \
--train_batch_size=1 \
--sample_batch_size=1 \
--gradient_accumulation_steps=1 --gradient_checkpointing \
--learning_rate=5e-6 \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--num_class_images=200 \
--max_train_steps=800 \
--mixed_precision=fp16
```
## ์ถ๋ก
๋ชจ๋ธ์ ํ์ตํ ํ์๋, ๋ชจ๋ธ์ด ์ ์ฅ๋ ๊ฒฝ๋ก๋ฅผ ์ง์ ํด [`StableDiffusionPipeline`]๋ก ์ถ๋ก ์ ์ํํ ์ ์์ต๋๋ค. ํ๋กฌํํธ์ ํ์ต์ ์ฌ์ฉ๋ ํน์ `์๋ณ์`(์ด์ ์์์ `sks`)๊ฐ ํฌํจ๋์ด ์๋์ง ํ์ธํ์ธ์.
**`"accelerate>=0.16.0"`**์ด ์ค์น๋์ด ์๋ ๊ฒฝ์ฐ ๋ค์ ์ฝ๋๋ฅผ ์ฌ์ฉํ์ฌ ์ค๊ฐ ์ฒดํฌํฌ์ธํธ์์ ์ถ๋ก ์ ์คํํ ์ ์์ต๋๋ค:
```python
from diffusers import StableDiffusionPipeline
import torch
model_id = "path_to_saved_model"
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
prompt = "A photo of sks dog in a bucket"
image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
```
[์ ์ฅ๋ ํ์ต ์ฒดํฌํฌ์ธํธ](#inference-from-a-saved-checkpoint)์์๋ ์ถ๋ก ์ ์คํํ ์๋ ์์ต๋๋ค.
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.