text stringlengths 0 5.54k |
|---|
upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained( |
"stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, variant="fp16", use_safetensors=True |
) |
upscaler.enable_model_cpu_offload() |
upscaler.enable_xformers_memory_efficient_attention() |
image_2 = upscaler(prompt, image=image_1, output_type="latent").images[0] Finally, chain it to a super-resolution pipeline to further enhance the resolution: Copied from diffusers import StableDiffusionUpscalePipeline |
super_res = StableDiffusionUpscalePipeline.from_pretrained( |
"stabilityai/stable-diffusion-x4-upscaler", torch_dtype=torch.float16, variant="fp16", use_safetensors=True |
) |
super_res.enable_model_cpu_offload() |
super_res.enable_xformers_memory_efficient_attention() |
image_3 = super_res(prompt, image=image_2).images[0] |
make_image_grid([init_image, image_3.resize((512, 512))], rows=1, cols=2) Control image generation Trying to generate an image that looks exactly the way you want can be difficult, which is why controlled generation techniques and models are so useful. While you can use the negative_prompt to partially control image generation, there are more robust methods like prompt weighting and ControlNets. Prompt weighting Prompt weighting allows you to scale the representation of each concept in a prompt. For example, in a prompt like “Astronaut in a jungle, cold color palette, muted colors, detailed, 8k”, you can choose to increase or decrease the embeddings of “astronaut” and “jungle”. The Compel library provides a simple syntax for adjusting prompt weights and generating the embeddings. You can learn how to create the embeddings in the Prompt weighting guide. AutoPipelineForImage2Image has a prompt_embeds (and negative_prompt_embeds if you’re using a negative prompt) parameter where you can pass the embeddings which replaces the prompt parameter. Copied from diffusers import AutoPipelineForImage2Image |
import torch |
pipeline = AutoPipelineForImage2Image.from_pretrained( |
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True |
) |
pipeline.enable_model_cpu_offload() |
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed |
pipeline.enable_xformers_memory_efficient_attention() |
image = pipeline(prompt_embeds=prompt_embeds, # generated from Compel |
negative_prompt_embeds=negative_prompt_embeds, # generated from Compel |
image=init_image, |
).images[0] ControlNet ControlNets provide a more flexible and accurate way to control image generation because you can use an additional conditioning image. The conditioning image can be a canny image, depth map, image segmentation, and even scribbles! Whatever type of conditioning image you choose, the ControlNet generates an image that preserves the information in it. For example, let’s condition an image with a depth map to keep the spatial information in the image. Copied from diffusers.utils import load_image, make_image_grid |
# prepare image |
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png" |
init_image = load_image(url) |
init_image = init_image.resize((958, 960)) # resize to depth image dimensions |
depth_image = load_image("https://huggingface.co/lllyasviel/control_v11f1p_sd15_depth/resolve/main/images/control.png") |
make_image_grid([init_image, depth_image], rows=1, cols=2) Load a ControlNet model conditioned on depth maps and the AutoPipelineForImage2Image: Copied from diffusers import ControlNetModel, AutoPipelineForImage2Image |
import torch |
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11f1p_sd15_depth", torch_dtype=torch.float16, variant="fp16", use_safetensors=True) |
pipeline = AutoPipelineForImage2Image.from_pretrained( |
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16, variant="fp16", use_safetensors=True |
) |
pipeline.enable_model_cpu_offload() |
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed |
pipeline.enable_xformers_memory_efficient_attention() Now generate a new image conditioned on the depth map, initial image, and prompt: Copied prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" |
image_control_net = pipeline(prompt, image=init_image, control_image=depth_image).images[0] |
make_image_grid([init_image, depth_image, image_control_net], rows=1, cols=3) initial image depth image ControlNet image Let’s apply a new style to the image generated from the ControlNet by chaining it with an image-to-image pipeline: Copied pipeline = AutoPipelineForImage2Image.from_pretrained( |
"nitrosocke/elden-ring-diffusion", torch_dtype=torch.float16, |
) |
pipeline.enable_model_cpu_offload() |
# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed |
pipeline.enable_xformers_memory_efficient_attention() |
prompt = "elden ring style astronaut in a jungle" # include the token "elden ring style" in the prompt |
negative_prompt = "ugly, deformed, disfigured, poor details, bad anatomy" |
image_elden_ring = pipeline(prompt, negative_prompt=negative_prompt, image=image_control_net, strength=0.45, guidance_scale=10.5).images[0] |
make_image_grid([init_image, depth_image, image_control_net, image_elden_ring], rows=2, cols=2) Optimize Running diffusion models is computationally expensive and intensive, but with a few optimization tricks, it is entirely possible to run them on consumer and free-tier GPUs. For example, you can use a more memory-efficient form of attention such as PyTorch 2.0’s scaled-dot product attention or xFormers (you can use one or the other, but there’s no need to use both). You can also offload the model to the GPU while the other pipeline components wait on the CPU. Copied + pipeline.enable_model_cpu_offload() |
+ pipeline.enable_xformers_memory_efficient_attention() With torch.compile, you can boost your inference speed even more by wrapping your UNet with it: Copied pipeline.unet = torch.compile(pipeline.unet, mode="reduce-overhead", fullgraph=True) To learn more, take a look at the Reduce memory usage and Torch 2.0 guides. |
DeepFloyd IF Overview DeepFloyd IF is a novel state-of-the-art open-source text-to-image model with a high degree of photorealism and language understanding. |
The model is a modular composed of a frozen text encoder and three cascaded pixel diffusion modules: Stage 1: a base model that generates 64x64 px image based on text prompt, Stage 2: a 64x64 px => 256x256 px super-resolution model, and Stage 3: a 256x256 px => 1024x1024 px super-resolution model |
Stage 1 and Stage 2 utilize a frozen text encoder based on the T5 transformer to extract text embeddings, which are then fed into a UNet architecture enhanced with cross-attention and attention pooling. |
Stage 3 is Stability AI’s x4 Upscaling model. |
The result is a highly efficient model that outperforms current state-of-the-art models, achieving a zero-shot FID score of 6.66 on the COCO dataset. |
Our work underscores the potential of larger UNet architectures in the first stage of cascaded diffusion models and depicts a promising future for text-to-image synthesis. Usage Before you can use IF, you need to accept its usage conditions. To do so: Make sure to have a Hugging Face account and be logged in. Accept the license on the model card of DeepFloyd/IF-I-XL-v1.0. Accepting the license on the stage I model card will auto accept for the other IF models. Make sure to login locally. Install huggingface_hub: Copied pip install huggingface_hub --upgrade run the login function in a Python shell: Copied from huggingface_hub import login |
login() and enter your Hugging Face Hub access token. Next we install diffusers and dependencies: Copied pip install -q diffusers accelerate transformers The following sections give more in-detail examples of how to use IF. Specifically: Text-to-Image Generation Image-to-Image Generation Inpainting Reusing model weights Speed optimization Memory optimization Available checkpoints Stage-1 DeepFloyd/IF-I-XL-v1.0 DeepFloyd/IF-I-L-v1.0 DeepFloyd/IF-I-M-v1.0 Stage-2 DeepFloyd/IF-II-L-v1.0 DeepFloyd/IF-II-M-v1.0 Stage-3 stabilityai/stable-diffusion-x4-upscaler Google Colab Text-to-Image Generation By default diffusers makes use of model cpu offloading to run the whole IF pipeline with as little as 14 GB of VRAM. Copied from diffusers import DiffusionPipeline |
from diffusers.utils import pt_to_pil, make_image_grid |
import torch |
# stage 1 |
stage_1 = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16) |
stage_1.enable_model_cpu_offload() |
# stage 2 |
stage_2 = DiffusionPipeline.from_pretrained( |
"DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16 |
) |
stage_2.enable_model_cpu_offload() |
# stage 3 |
safety_modules = { |
"feature_extractor": stage_1.feature_extractor, |
"safety_checker": stage_1.safety_checker, |
"watermarker": stage_1.watermarker, |
} |
stage_3 = DiffusionPipeline.from_pretrained( |
"stabilityai/stable-diffusion-x4-upscaler", **safety_modules, torch_dtype=torch.float16 |
) |
stage_3.enable_model_cpu_offload() |
prompt = 'a photo of a kangaroo wearing an orange hoodie and blue sunglasses standing in front of the eiffel tower holding a sign that says "very deep learning"' |
generator = torch.manual_seed(1) |
# text embeds |
prompt_embeds, negative_embeds = stage_1.encode_prompt(prompt) |
# stage 1 |
stage_1_output = stage_1( |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.