text stringlengths 0 5.54k |
|---|
prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, generator=generator, output_type="pt" |
).images |
#pt_to_pil(stage_1_output)[0].save("./if_stage_I.png") |
# stage 2 |
stage_2_output = stage_2( |
image=stage_1_output, |
prompt_embeds=prompt_embeds, |
negative_prompt_embeds=negative_embeds, |
generator=generator, |
output_type="pt", |
).images |
#pt_to_pil(stage_2_output)[0].save("./if_stage_II.png") |
# stage 3 |
stage_3_output = stage_3(prompt=prompt, image=stage_2_output, noise_level=100, generator=generator).images |
#stage_3_output[0].save("./if_stage_III.png") |
make_image_grid([pt_to_pil(stage_1_output)[0], pt_to_pil(stage_2_output)[0], stage_3_output[0]], rows=1, rows=3) Text Guided Image-to-Image Generation The same IF model weights can be used for text-guided image-to-image translation or image variation. |
In this case just make sure to load the weights using the IFImg2ImgPipeline and IFImg2ImgSuperResolutionPipeline pipelines. Note: You can also directly move the weights of the text-to-image pipelines to the image-to-image pipelines |
without loading them twice by making use of the components argument as explained here. Copied from diffusers import IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline, DiffusionPipeline |
from diffusers.utils import pt_to_pil, load_image, make_image_grid |
import torch |
# download image |
url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" |
original_image = load_image(url) |
original_image = original_image.resize((768, 512)) |
# stage 1 |
stage_1 = IFImg2ImgPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16) |
stage_1.enable_model_cpu_offload() |
# stage 2 |
stage_2 = IFImg2ImgSuperResolutionPipeline.from_pretrained( |
"DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16 |
) |
stage_2.enable_model_cpu_offload() |
# stage 3 |
safety_modules = { |
"feature_extractor": stage_1.feature_extractor, |
"safety_checker": stage_1.safety_checker, |
"watermarker": stage_1.watermarker, |
} |
stage_3 = DiffusionPipeline.from_pretrained( |
"stabilityai/stable-diffusion-x4-upscaler", **safety_modules, torch_dtype=torch.float16 |
) |
stage_3.enable_model_cpu_offload() |
prompt = "A fantasy landscape in style minecraft" |
generator = torch.manual_seed(1) |
# text embeds |
prompt_embeds, negative_embeds = stage_1.encode_prompt(prompt) |
# stage 1 |
stage_1_output = stage_1( |
image=original_image, |
prompt_embeds=prompt_embeds, |
negative_prompt_embeds=negative_embeds, |
generator=generator, |
output_type="pt", |
).images |
#pt_to_pil(stage_1_output)[0].save("./if_stage_I.png") |
# stage 2 |
stage_2_output = stage_2( |
image=stage_1_output, |
original_image=original_image, |
prompt_embeds=prompt_embeds, |
negative_prompt_embeds=negative_embeds, |
generator=generator, |
output_type="pt", |
).images |
#pt_to_pil(stage_2_output)[0].save("./if_stage_II.png") |
# stage 3 |
stage_3_output = stage_3(prompt=prompt, image=stage_2_output, generator=generator, noise_level=100).images |
#stage_3_output[0].save("./if_stage_III.png") |
make_image_grid([original_image, pt_to_pil(stage_1_output)[0], pt_to_pil(stage_2_output)[0], stage_3_output[0]], rows=1, rows=4) Text Guided Inpainting Generation The same IF model weights can be used for text-guided image-to-image translation or image variation. |
In this case just make sure to load the weights using the IFInpaintingPipeline and IFInpaintingSuperResolutionPipeline pipelines. Note: You can also directly move the weights of the text-to-image pipelines to the image-to-image pipelines |
without loading them twice by making use of the ~DiffusionPipeline.components() function as explained here. Copied from diffusers import IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, DiffusionPipeline |
from diffusers.utils import pt_to_pil, load_image, make_image_grid |
import torch |
# download image |
url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/person.png" |
original_image = load_image(url) |
# download mask |
url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/glasses_mask.png" |
mask_image = load_image(url) |
# stage 1 |
stage_1 = IFInpaintingPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16) |
stage_1.enable_model_cpu_offload() |
# stage 2 |
stage_2 = IFInpaintingSuperResolutionPipeline.from_pretrained( |
"DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.