text stringlengths 0 5.54k |
|---|
>>> from diffusers import IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, DiffusionPipeline |
>>> from diffusers.utils import pt_to_pil |
>>> import torch |
>>> from PIL import Image |
>>> import requests |
>>> from io import BytesIO |
>>> url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/person.png" |
>>> response = requests.get(url) |
>>> original_image = Image.open(BytesIO(response.content)).convert("RGB") |
>>> original_image = original_image |
>>> url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/glasses_mask.png" |
>>> response = requests.get(url) |
>>> mask_image = Image.open(BytesIO(response.content)) |
>>> mask_image = mask_image |
>>> pipe = IFInpaintingPipeline.from_pretrained( |
... "DeepFloyd/IF-I-IF-v1.0", variant="fp16", torch_dtype=torch.float16 |
... ) |
>>> pipe.enable_model_cpu_offload() |
>>> prompt = "blue sunglasses" |
>>> prompt_embeds, negative_embeds = pipe.encode_prompt(prompt) |
>>> image = pipe( |
... image=original_image, |
... mask_image=mask_image, |
... prompt_embeds=prompt_embeds, |
... negative_prompt_embeds=negative_embeds, |
... output_type="pt", |
... ).images |
>>> # save intermediate image |
>>> pil_image = pt_to_pil(image) |
>>> pil_image[0].save("./if_stage_I.png") |
>>> super_res_1_pipe = IFInpaintingSuperResolutionPipeline.from_pretrained( |
... "DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16 |
... ) |
>>> super_res_1_pipe.enable_model_cpu_offload() |
>>> image = super_res_1_pipe( |
... image=image, |
... mask_image=mask_image, |
... original_image=original_image, |
... prompt_embeds=prompt_embeds, |
... negative_prompt_embeds=negative_embeds, |
... ).images |
>>> image[0].save("./if_stage_II.png") |
enable_model_cpu_offload |
< |
source |
> |
( |
gpu_id = 0 |
) |
Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared |
to enable_sequential_cpu_offload, this method moves one whole model at a time to the GPU when its forward |
method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with |
enable_sequential_cpu_offload, but performance is much better due to the iterative execution of the unet. |
enable_sequential_cpu_offload |
< |
source |
> |
( |
gpu_id = 0 |
) |
Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, the pipeline’s |
models have their state dicts saved to CPU and then are moved to a torch.device('meta') and loaded to GPU only when their specific submodule has its forward` method called. |
encode_prompt |
< |
source |
> |
( |
prompt |
do_classifier_free_guidance = True |
num_images_per_prompt = 1 |
device = None |
negative_prompt = None |
prompt_embeds: typing.Optional[torch.FloatTensor] = None |
negative_prompt_embeds: typing.Optional[torch.FloatTensor] = None |
clean_caption: bool = False |
) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.