text stringlengths 0 5.54k |
|---|
) |
stage_2.enable_model_cpu_offload() |
# stage 3 |
safety_modules = { |
"feature_extractor": stage_1.feature_extractor, |
"safety_checker": stage_1.safety_checker, |
"watermarker": stage_1.watermarker, |
} |
stage_3 = DiffusionPipeline.from_pretrained( |
"stabilityai/stable-diffusion-x4-upscaler", **safety_modules, torch_dtype=torch.float16 |
) |
stage_3.enable_model_cpu_offload() |
prompt = "blue sunglasses" |
generator = torch.manual_seed(1) |
# text embeds |
prompt_embeds, negative_embeds = stage_1.encode_prompt(prompt) |
# stage 1 |
stage_1_output = stage_1( |
image=original_image, |
mask_image=mask_image, |
prompt_embeds=prompt_embeds, |
negative_prompt_embeds=negative_embeds, |
generator=generator, |
output_type="pt", |
).images |
#pt_to_pil(stage_1_output)[0].save("./if_stage_I.png") |
# stage 2 |
stage_2_output = stage_2( |
image=stage_1_output, |
original_image=original_image, |
mask_image=mask_image, |
prompt_embeds=prompt_embeds, |
negative_prompt_embeds=negative_embeds, |
generator=generator, |
output_type="pt", |
).images |
#pt_to_pil(stage_1_output)[0].save("./if_stage_II.png") |
# stage 3 |
stage_3_output = stage_3(prompt=prompt, image=stage_2_output, generator=generator, noise_level=100).images |
#stage_3_output[0].save("./if_stage_III.png") |
make_image_grid([original_image, mask_image, pt_to_pil(stage_1_output)[0], pt_to_pil(stage_2_output)[0], stage_3_output[0]], rows=1, rows=5) Converting between different pipelines In addition to being loaded with from_pretrained, Pipelines can also be loaded directly from each other. Copied from diffusers import IFPipeline, IFSuperResolutionPipeline |
pipe_1 = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0") |
pipe_2 = IFSuperResolutionPipeline.from_pretrained("DeepFloyd/IF-II-L-v1.0") |
from diffusers import IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline |
pipe_1 = IFImg2ImgPipeline(**pipe_1.components) |
pipe_2 = IFImg2ImgSuperResolutionPipeline(**pipe_2.components) |
from diffusers import IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline |
pipe_1 = IFInpaintingPipeline(**pipe_1.components) |
pipe_2 = IFInpaintingSuperResolutionPipeline(**pipe_2.components) Optimizing for speed The simplest optimization to run IF faster is to move all model components to the GPU. Copied pipe = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16) |
pipe.to("cuda") You can also run the diffusion process for a shorter number of timesteps. This can either be done with the num_inference_steps argument: Copied pipe("<prompt>", num_inference_steps=30) Or with the timesteps argument: Copied from diffusers.pipelines.deepfloyd_if import fast27_timesteps |
pipe("<prompt>", timesteps=fast27_timesteps) When doing image variation or inpainting, you can also decrease the number of timesteps |
with the strength argument. The strength argument is the amount of noise to add to the input image which also determines how many steps to run in the denoising process. |
A smaller number will vary the image less but run faster. Copied pipe = IFImg2ImgPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16) |
pipe.to("cuda") |
image = pipe(image=image, prompt="<prompt>", strength=0.3).images You can also use torch.compile. Note that we have not exhaustively tested torch.compile |
with IF and it might not give expected results. Copied from diffusers import DiffusionPipeline |
import torch |
pipe = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16) |
pipe.to("cuda") |
pipe.text_encoder = torch.compile(pipe.text_encoder, mode="reduce-overhead", fullgraph=True) |
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) Optimizing for memory When optimizing for GPU memory, we can use the standard diffusers CPU offloading APIs. Either the model based CPU offloading, Copied pipe = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16) |
pipe.enable_model_cpu_offload() or the more aggressive layer based CPU offloading. Copied pipe = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16) |
pipe.enable_sequential_cpu_offload() Additionally, T5 can be loaded in 8bit precision Copied from transformers import T5EncoderModel |
text_encoder = T5EncoderModel.from_pretrained( |
"DeepFloyd/IF-I-XL-v1.0", subfolder="text_encoder", device_map="auto", load_in_8bit=True, variant="8bit" |
) |
from diffusers import DiffusionPipeline |
pipe = DiffusionPipeline.from_pretrained( |
"DeepFloyd/IF-I-XL-v1.0", |
text_encoder=text_encoder, # pass the previously instantiated 8bit text encoder |
unet=None, |
device_map="auto", |
) |
prompt_embeds, negative_embeds = pipe.encode_prompt("<prompt>") For CPU RAM constrained machines like Google Colab free tier where we can’t load all model components to the CPU at once, we can manually only load the pipeline with |
the text encoder or UNet when the respective model components are needed. Copied from diffusers import IFPipeline, IFSuperResolutionPipeline |
import torch |
import gc |
from transformers import T5EncoderModel |
from diffusers.utils import pt_to_pil, make_image_grid |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.