Buckets:
| import{s as pt,o as dt,n as Je}from"../chunks/scheduler.8c3d61f6.js";import{S as ct,i as ft,g as d,s as o,r as g,A as mt,h as c,f as s,c as a,j as C,u,x as v,k as W,y as r,a as l,v as h,d as _,t as b,w as y}from"../chunks/index.da70eac4.js";import{T as gt}from"../chunks/Tip.1d9b8c37.js";import{D as pe}from"../chunks/Docstring.0b9cc58b.js";import{C as He}from"../chunks/CodeBlock.a9c4becf.js";import{E as Fe}from"../chunks/ExampleCodeBlock.ba0ba69d.js";import{H as Ie,E as ut}from"../chunks/index.a831177d.js";function ht(k){let n,m="This is an experimental feature!";return{c(){n=d("p"),n.textContent=m},l(i){n=c(i,"P",{"data-svelte-h":!0}),v(n)!=="svelte-dru17w"&&(n.textContent=m)},m(i,w){l(i,n,w)},p:Je,d(i){i&&s(n)}}}function _t(k){let n,m;return n=new He({props:{code:"aW1wb3J0JTIwUElMJTBBaW1wb3J0JTIwcmVxdWVzdHMlMEFpbXBvcnQlMjB0b3JjaCUwQWZyb20lMjBpbyUyMGltcG9ydCUyMEJ5dGVzSU8lMEElMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwU3RhYmxlRGlmZnVzaW9uRGlmZkVkaXRQaXBlbGluZSUwQSUwQSUwQWRlZiUyMGRvd25sb2FkX2ltYWdlKHVybCklM0ElMEElMjAlMjAlMjAlMjByZXNwb25zZSUyMCUzRCUyMHJlcXVlc3RzLmdldCh1cmwpJTBBJTIwJTIwJTIwJTIwcmV0dXJuJTIwUElMLkltYWdlLm9wZW4oQnl0ZXNJTyhyZXNwb25zZS5jb250ZW50KSkuY29udmVydCglMjJSR0IlMjIpJTBBJTBBJTBBaW1nX3VybCUyMCUzRCUyMCUyMmh0dHBzJTNBJTJGJTJGZ2l0aHViLmNvbSUyRlhpYW5nLWNkJTJGRGlmZkVkaXQtc3RhYmxlLWRpZmZ1c2lvbiUyRnJhdyUyRm1haW4lMkZhc3NldHMlMkZvcmlnaW4ucG5nJTIyJTBBJTBBaW5pdF9pbWFnZSUyMCUzRCUyMGRvd25sb2FkX2ltYWdlKGltZ191cmwpLnJlc2l6ZSgoNzY4JTJDJTIwNzY4KSklMEElMEFwaXBlbGluZSUyMCUzRCUyMFN0YWJsZURpZmZ1c2lvbkRpZmZFZGl0UGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMnN0YWJpbGl0eWFpJTJGc3RhYmxlLWRpZmZ1c2lvbi0yLTElMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYlMEEpJTBBJTBBcGlwZWxpbmUuc2NoZWR1bGVyJTIwJTNEJTIwRERJTVNjaGVkdWxlci5mcm9tX2NvbmZpZyhwaXBlbGluZS5zY2hlZHVsZXIuY29uZmlnKSUwQXBpcGVsaW5lLmludmVyc2Vfc2NoZWR1bGVyJTIwJTNEJTIwRERJTUludmVyc2VTY2hlZHVsZXIuZnJvbV9jb25maWcocGlwZWxpbmUuc2NoZWR1bGVyLmNvbmZpZyklMEFwaXBlbGluZS5lbmFibGVfbW9kZWxfY3B1X29mZmxvYWQoKSUwQSUwQW1hc2tfcHJvbXB0JTIwJTNEJTIwJTIyQSUyMGJvd2wlMjBvZiUyMGZydWl0cyUyMiUwQXByb21wdCUyMCUzRCUyMCUyMkElMjBib3dsJTIwb2YlMjBwZWFycyUyMiUwQSUwQW1hc2tfaW1hZ2UlMjAlM0QlMjBwaXBlbGluZS5nZW5lcmF0ZV9tYXNrKGltYWdlJTNEaW5pdF9pbWFnZSUyQyUyMHNvdXJjZV9wcm9tcHQlM0Rwcm9tcHQlMkMlMjB0YXJnZXRfcHJvbXB0JTNEbWFza19wcm9tcHQpJTBBaW1hZ2VfbGF0ZW50cyUyMCUzRCUyMHBpcGVsaW5lLmludmVydChpbWFnZSUzRGluaXRfaW1hZ2UlMkMlMjBwcm9tcHQlM0RtYXNrX3Byb21wdCkubGF0ZW50cyUwQWltYWdlJTIwJTNEJTIwcGlwZWxpbmUocHJvbXB0JTNEcHJvbXB0JTJDJTIwbWFza19pbWFnZSUzRG1hc2tfaW1hZ2UlMkMlMjBpbWFnZV9sYXRlbnRzJTNEaW1hZ2VfbGF0ZW50cykuaW1hZ2VzJTVCMCU1RA==",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> PIL | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> requests | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> io <span class="hljs-keyword">import</span> BytesIO | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionDiffEditPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">download_image</span>(<span class="hljs-params">url</span>): | |
| <span class="hljs-meta">... </span> response = requests.get(url) | |
| <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> PIL.Image.<span class="hljs-built_in">open</span>(BytesIO(response.content)).convert(<span class="hljs-string">"RGB"</span>) | |
| <span class="hljs-meta">>>> </span>img_url = <span class="hljs-string">"https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png"</span> | |
| <span class="hljs-meta">>>> </span>init_image = download_image(img_url).resize((<span class="hljs-number">768</span>, <span class="hljs-number">768</span>)) | |
| <span class="hljs-meta">>>> </span>pipeline = StableDiffusionDiffEditPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"stabilityai/stable-diffusion-2-1"</span>, torch_dtype=torch.float16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config) | |
| <span class="hljs-meta">>>> </span>pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config) | |
| <span class="hljs-meta">>>> </span>pipeline.enable_model_cpu_offload() | |
| <span class="hljs-meta">>>> </span>mask_prompt = <span class="hljs-string">"A bowl of fruits"</span> | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"A bowl of pears"</span> | |
| <span class="hljs-meta">>>> </span>mask_image = pipeline.generate_mask(image=init_image, source_prompt=prompt, target_prompt=mask_prompt) | |
| <span class="hljs-meta">>>> </span>image_latents = pipeline.invert(image=init_image, prompt=mask_prompt).latents | |
| <span class="hljs-meta">>>> </span>image = pipeline(prompt=prompt, mask_image=mask_image, image_latents=image_latents).images[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){g(n.$$.fragment)},l(i){u(n.$$.fragment,i)},m(i,w){h(n,i,w),m=!0},p:Je,i(i){m||(_(n.$$.fragment,i),m=!0)},o(i){b(n.$$.fragment,i),m=!1},d(i){y(n,i)}}}function bt(k){let n,m;return n=new He({props:{code:"aW1wb3J0JTIwUElMJTBBaW1wb3J0JTIwcmVxdWVzdHMlMEFpbXBvcnQlMjB0b3JjaCUwQWZyb20lMjBpbyUyMGltcG9ydCUyMEJ5dGVzSU8lMEElMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwU3RhYmxlRGlmZnVzaW9uRGlmZkVkaXRQaXBlbGluZSUwQSUwQSUwQWRlZiUyMGRvd25sb2FkX2ltYWdlKHVybCklM0ElMEElMjAlMjAlMjAlMjByZXNwb25zZSUyMCUzRCUyMHJlcXVlc3RzLmdldCh1cmwpJTBBJTIwJTIwJTIwJTIwcmV0dXJuJTIwUElMLkltYWdlLm9wZW4oQnl0ZXNJTyhyZXNwb25zZS5jb250ZW50KSkuY29udmVydCglMjJSR0IlMjIpJTBBJTBBJTBBaW1nX3VybCUyMCUzRCUyMCUyMmh0dHBzJTNBJTJGJTJGZ2l0aHViLmNvbSUyRlhpYW5nLWNkJTJGRGlmZkVkaXQtc3RhYmxlLWRpZmZ1c2lvbiUyRnJhdyUyRm1haW4lMkZhc3NldHMlMkZvcmlnaW4ucG5nJTIyJTBBJTBBaW5pdF9pbWFnZSUyMCUzRCUyMGRvd25sb2FkX2ltYWdlKGltZ191cmwpLnJlc2l6ZSgoNzY4JTJDJTIwNzY4KSklMEElMEFwaXBlbGluZSUyMCUzRCUyMFN0YWJsZURpZmZ1c2lvbkRpZmZFZGl0UGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMnN0YWJpbGl0eWFpJTJGc3RhYmxlLWRpZmZ1c2lvbi0yLTElMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYlMEEpJTBBJTBBcGlwZWxpbmUuc2NoZWR1bGVyJTIwJTNEJTIwRERJTVNjaGVkdWxlci5mcm9tX2NvbmZpZyhwaXBlbGluZS5zY2hlZHVsZXIuY29uZmlnKSUwQXBpcGVsaW5lLmludmVyc2Vfc2NoZWR1bGVyJTIwJTNEJTIwRERJTUludmVyc2VTY2hlZHVsZXIuZnJvbV9jb25maWcocGlwZWxpbmUuc2NoZWR1bGVyLmNvbmZpZyklMEFwaXBlbGluZS5lbmFibGVfbW9kZWxfY3B1X29mZmxvYWQoKSUwQSUwQXByb21wdCUyMCUzRCUyMCUyMkElMjBib3dsJTIwb2YlMjBmcnVpdHMlMjIlMEElMEFpbnZlcnRlZF9sYXRlbnRzJTIwJTNEJTIwcGlwZWxpbmUuaW52ZXJ0KGltYWdlJTNEaW5pdF9pbWFnZSUyQyUyMHByb21wdCUzRHByb21wdCkubGF0ZW50cw==",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> PIL | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> requests | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> io <span class="hljs-keyword">import</span> BytesIO | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionDiffEditPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">download_image</span>(<span class="hljs-params">url</span>): | |
| <span class="hljs-meta">... </span> response = requests.get(url) | |
| <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> PIL.Image.<span class="hljs-built_in">open</span>(BytesIO(response.content)).convert(<span class="hljs-string">"RGB"</span>) | |
| <span class="hljs-meta">>>> </span>img_url = <span class="hljs-string">"https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png"</span> | |
| <span class="hljs-meta">>>> </span>init_image = download_image(img_url).resize((<span class="hljs-number">768</span>, <span class="hljs-number">768</span>)) | |
| <span class="hljs-meta">>>> </span>pipeline = StableDiffusionDiffEditPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"stabilityai/stable-diffusion-2-1"</span>, torch_dtype=torch.float16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config) | |
| <span class="hljs-meta">>>> </span>pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config) | |
| <span class="hljs-meta">>>> </span>pipeline.enable_model_cpu_offload() | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"A bowl of fruits"</span> | |
| <span class="hljs-meta">>>> </span>inverted_latents = pipeline.invert(image=init_image, prompt=prompt).latents`,wrap:!1}}),{c(){g(n.$$.fragment)},l(i){u(n.$$.fragment,i)},m(i,w){h(n,i,w),m=!0},p:Je,i(i){m||(_(n.$$.fragment,i),m=!0)},o(i){b(n.$$.fragment,i),m=!1},d(i){y(n,i)}}}function yt(k){let n,m;return n=new He({props:{code:"aW1wb3J0JTIwUElMJTBBaW1wb3J0JTIwcmVxdWVzdHMlMEFpbXBvcnQlMjB0b3JjaCUwQWZyb20lMjBpbyUyMGltcG9ydCUyMEJ5dGVzSU8lMEElMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwU3RhYmxlRGlmZnVzaW9uRGlmZkVkaXRQaXBlbGluZSUwQSUwQSUwQWRlZiUyMGRvd25sb2FkX2ltYWdlKHVybCklM0ElMEElMjAlMjAlMjAlMjByZXNwb25zZSUyMCUzRCUyMHJlcXVlc3RzLmdldCh1cmwpJTBBJTIwJTIwJTIwJTIwcmV0dXJuJTIwUElMLkltYWdlLm9wZW4oQnl0ZXNJTyhyZXNwb25zZS5jb250ZW50KSkuY29udmVydCglMjJSR0IlMjIpJTBBJTBBJTBBaW1nX3VybCUyMCUzRCUyMCUyMmh0dHBzJTNBJTJGJTJGZ2l0aHViLmNvbSUyRlhpYW5nLWNkJTJGRGlmZkVkaXQtc3RhYmxlLWRpZmZ1c2lvbiUyRnJhdyUyRm1haW4lMkZhc3NldHMlMkZvcmlnaW4ucG5nJTIyJTBBJTBBaW5pdF9pbWFnZSUyMCUzRCUyMGRvd25sb2FkX2ltYWdlKGltZ191cmwpLnJlc2l6ZSgoNzY4JTJDJTIwNzY4KSklMEElMEFwaXBlbGluZSUyMCUzRCUyMFN0YWJsZURpZmZ1c2lvbkRpZmZFZGl0UGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMnN0YWJpbGl0eWFpJTJGc3RhYmxlLWRpZmZ1c2lvbi0yLTElMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYlMEEpJTBBJTBBcGlwZWxpbmUuc2NoZWR1bGVyJTIwJTNEJTIwRERJTVNjaGVkdWxlci5mcm9tX2NvbmZpZyhwaXBlbGluZS5zY2hlZHVsZXIuY29uZmlnKSUwQXBpcGVsaW5lLmludmVyc2Vfc2NoZWR1bGVyJTIwJTNEJTIwRERJTUludmVyc2VTY2hlZHVsZXIuZnJvbV9jb25maWcocGlwZWxpbmUuc2NoZWR1bGVyLmNvbmZpZyklMEFwaXBlbGluZS5lbmFibGVfbW9kZWxfY3B1X29mZmxvYWQoKSUwQSUwQW1hc2tfcHJvbXB0JTIwJTNEJTIwJTIyQSUyMGJvd2wlMjBvZiUyMGZydWl0cyUyMiUwQXByb21wdCUyMCUzRCUyMCUyMkElMjBib3dsJTIwb2YlMjBwZWFycyUyMiUwQSUwQW1hc2tfaW1hZ2UlMjAlM0QlMjBwaXBlbGluZS5nZW5lcmF0ZV9tYXNrKGltYWdlJTNEaW5pdF9pbWFnZSUyQyUyMHNvdXJjZV9wcm9tcHQlM0Rwcm9tcHQlMkMlMjB0YXJnZXRfcHJvbXB0JTNEbWFza19wcm9tcHQpJTBBaW1hZ2VfbGF0ZW50cyUyMCUzRCUyMHBpcGVsaW5lLmludmVydChpbWFnZSUzRGluaXRfaW1hZ2UlMkMlMjBwcm9tcHQlM0RtYXNrX3Byb21wdCkubGF0ZW50cyUwQWltYWdlJTIwJTNEJTIwcGlwZWxpbmUocHJvbXB0JTNEcHJvbXB0JTJDJTIwbWFza19pbWFnZSUzRG1hc2tfaW1hZ2UlMkMlMjBpbWFnZV9sYXRlbnRzJTNEaW1hZ2VfbGF0ZW50cykuaW1hZ2VzJTVCMCU1RA==",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> PIL | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> requests | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> io <span class="hljs-keyword">import</span> BytesIO | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionDiffEditPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">download_image</span>(<span class="hljs-params">url</span>): | |
| <span class="hljs-meta">... </span> response = requests.get(url) | |
| <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> PIL.Image.<span class="hljs-built_in">open</span>(BytesIO(response.content)).convert(<span class="hljs-string">"RGB"</span>) | |
| <span class="hljs-meta">>>> </span>img_url = <span class="hljs-string">"https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png"</span> | |
| <span class="hljs-meta">>>> </span>init_image = download_image(img_url).resize((<span class="hljs-number">768</span>, <span class="hljs-number">768</span>)) | |
| <span class="hljs-meta">>>> </span>pipeline = StableDiffusionDiffEditPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"stabilityai/stable-diffusion-2-1"</span>, torch_dtype=torch.float16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config) | |
| <span class="hljs-meta">>>> </span>pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config) | |
| <span class="hljs-meta">>>> </span>pipeline.enable_model_cpu_offload() | |
| <span class="hljs-meta">>>> </span>mask_prompt = <span class="hljs-string">"A bowl of fruits"</span> | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"A bowl of pears"</span> | |
| <span class="hljs-meta">>>> </span>mask_image = pipeline.generate_mask(image=init_image, source_prompt=prompt, target_prompt=mask_prompt) | |
| <span class="hljs-meta">>>> </span>image_latents = pipeline.invert(image=init_image, prompt=mask_prompt).latents | |
| <span class="hljs-meta">>>> </span>image = pipeline(prompt=prompt, mask_image=mask_image, image_latents=image_latents).images[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){g(n.$$.fragment)},l(i){u(n.$$.fragment,i)},m(i,w){h(n,i,w),m=!0},p:Je,i(i){m||(_(n.$$.fragment,i),m=!0)},o(i){b(n.$$.fragment,i),m=!1},d(i){y(n,i)}}}function vt(k){let n,m,i,w,$,ce,B,Ye='<a href="https://huggingface.co/papers/2210.11427" rel="nofollow">DiffEdit: Diffusion-based semantic image editing with mask guidance</a> is by Guillaume Couairon, Jakob Verbeek, Holger Schwenk, and Matthieu Cord.',fe,L,Qe="The abstract from the paper is:",me,G,qe="<em>Image generation has recently seen tremendous advances, with diffusion models allowing to synthesize convincing images for a large variety of text prompts. In this article, we propose DiffEdit, a method to take advantage of text-conditioned diffusion models for the task of semantic image editing, where the goal is to edit an image based on a text query. Semantic image editing is an extension of image generation, with the additional constraint that the generated image should be as similar as possible to a given input image. Current editing methods based on diffusion models usually require to provide a mask, making the task much easier by treating it as a conditional inpainting task. In contrast, our main contribution is able to automatically generate a mask highlighting regions of the input image that need to be edited, by contrasting predictions of a diffusion model conditioned on different text prompts. Moreover, we rely on latent inference to preserve content in those regions of interest and show excellent synergies with mask-based diffusion. DiffEdit achieves state-of-the-art editing performance on ImageNet. In addition, we evaluate semantic image editing in more challenging settings, using images from the COCO dataset as well as text-based generated images.</em>",ge,N,Ae='The original codebase can be found at <a href="https://github.com/Xiang-cd/DiffEdit-stable-diffusion" rel="nofollow">Xiang-cd/DiffEdit-stable-diffusion</a>, and you can try it out in this <a href="https://blog.problemsolversguild.com/technical/research/2022/11/02/DiffEdit-Implementation.html" rel="nofollow">demo</a>.',ue,R,Oe='This pipeline was contributed by <a href="https://github.com/clarencechen" rel="nofollow">clarencechen</a>. ❤️',he,V,_e,X,Ke=`<li>The pipeline can generate masks that can be fed into other inpainting pipelines.</li> <li>In order to generate an image using this pipeline, both an image mask (source and target prompts can be manually specified or generated, and passed to <a href="/docs/diffusers/pr_11477/en/api/pipelines/diffedit#diffusers.StableDiffusionDiffEditPipeline.generate_mask">generate_mask()</a>) | |
| and a set of partially inverted latents (generated using <a href="/docs/diffusers/pr_11477/en/api/pipelines/diffedit#diffusers.StableDiffusionDiffEditPipeline.invert">invert()</a>) <em>must</em> be provided as arguments when calling the pipeline to generate the final edited image.</li> <li>The function <a href="/docs/diffusers/pr_11477/en/api/pipelines/diffedit#diffusers.StableDiffusionDiffEditPipeline.generate_mask">generate_mask()</a> exposes two prompt arguments, <code>source_prompt</code> and <code>target_prompt</code> | |
| that let you control the locations of the semantic edits in the final image to be generated. Let’s say, | |
| you wanted to translate from “cat” to “dog”. In this case, the edit direction will be “cat -> dog”. To reflect | |
| this in the generated mask, you simply have to set the embeddings related to the phrases including “cat” to | |
| <code>source_prompt</code> and “dog” to <code>target_prompt</code>.</li> <li>When generating partially inverted latents using <code>invert</code>, assign a caption or text embedding describing the | |
| overall image to the <code>prompt</code> argument to help guide the inverse latent sampling process. In most cases, the | |
| source concept is sufficiently descriptive to yield good results, but feel free to explore alternatives.</li> <li>When calling the pipeline to generate the final edited image, assign the source concept to <code>negative_prompt</code> | |
| and the target concept to <code>prompt</code>. Taking the above example, you simply have to set the embeddings related to | |
| the phrases including “cat” to <code>negative_prompt</code> and “dog” to <code>prompt</code>.</li> <li>If you wanted to reverse the direction in the example above, i.e., “dog -> cat”, then it’s recommended to:<ul><li>Swap the <code>source_prompt</code> and <code>target_prompt</code> in the arguments to <code>generate_mask</code>.</li> <li>Change the input prompt in <a href="/docs/diffusers/pr_11477/en/api/pipelines/diffedit#diffusers.StableDiffusionDiffEditPipeline.invert">invert()</a> to include “dog”.</li> <li>Swap the <code>prompt</code> and <code>negative_prompt</code> in the arguments to call the pipeline to generate the final edited image.</li></ul></li> <li>The source and target prompts, or their corresponding embeddings, can also be automatically generated. Please refer to the <a href="../../using-diffusers/diffedit">DiffEdit</a> guide for more details.</li>`,be,z,ye,p,F,Ze,E,Ee,ee,et="Pipeline for text-guided image inpainting using Stable Diffusion and DiffEdit.",Se,te,tt=`This model inherits from <a href="/docs/diffusers/pr_11477/en/api/pipelines/overview#diffusers.DiffusionPipeline">DiffusionPipeline</a>. Check the superclass documentation for the generic methods | |
| implemented for all pipelines (downloading, saving, running on a particular device, etc.).`,Ue,ne,nt="The pipeline also inherits the following loading and saving methods:",Pe,ie,it='<li><a href="/docs/diffusers/pr_11477/en/api/loaders/textual_inversion#diffusers.loaders.TextualInversionLoaderMixin.load_textual_inversion">load_textual_inversion()</a> for loading textual inversion embeddings</li> <li><a href="/docs/diffusers/pr_11477/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_weights">load_lora_weights()</a> for loading LoRA weights</li> <li><a href="/docs/diffusers/pr_11477/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.save_lora_weights">save_lora_weights()</a> for saving LoRA weights</li>',je,D,H,Ce,se,st="Generate a latent mask given a mask prompt, a target prompt, and an image.",We,S,$e,T,Y,Be,oe,ot="Generate inverted latents given a prompt and image.",Le,U,Ge,x,Q,Ne,ae,at="The call function to the pipeline for generation.",Re,P,Ve,j,q,Xe,re,rt="Encodes the prompt into text encoder hidden states.",ve,A,we,M,O,ze,le,lt="Output class for Stable Diffusion pipelines.",ke,K,De,de,Te;return $=new Ie({props:{title:"DiffEdit",local:"diffedit",headingTag:"h1"}}),V=new Ie({props:{title:"Tips",local:"tips",headingTag:"h2"}}),z=new Ie({props:{title:"StableDiffusionDiffEditPipeline",local:"diffusers.StableDiffusionDiffEditPipeline",headingTag:"h2"}}),F=new pe({props:{name:"class diffusers.StableDiffusionDiffEditPipeline",anchor:"diffusers.StableDiffusionDiffEditPipeline",parameters:[{name:"vae",val:": AutoencoderKL"},{name:"text_encoder",val:": CLIPTextModel"},{name:"tokenizer",val:": CLIPTokenizer"},{name:"unet",val:": UNet2DConditionModel"},{name:"scheduler",val:": KarrasDiffusionSchedulers"},{name:"safety_checker",val:": StableDiffusionSafetyChecker"},{name:"feature_extractor",val:": CLIPImageProcessor"},{name:"inverse_scheduler",val:": DDIMInverseScheduler"},{name:"requires_safety_checker",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.StableDiffusionDiffEditPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_11477/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) — | |
| Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.text_encoder",description:`<strong>text_encoder</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPTextModel" rel="nofollow">CLIPTextModel</a>) — | |
| Frozen text-encoder (<a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a>).`,name:"text_encoder"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>) — | |
| A <code>CLIPTokenizer</code> to tokenize text.`,name:"tokenizer"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.unet",description:`<strong>unet</strong> (<a href="/docs/diffusers/pr_11477/en/api/models/unet2d-cond#diffusers.UNet2DConditionModel">UNet2DConditionModel</a>) — | |
| A <code>UNet2DConditionModel</code> to denoise the encoded image latents.`,name:"unet"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_11477/en/api/schedulers/overview#diffusers.SchedulerMixin">SchedulerMixin</a>) — | |
| A scheduler to be used in combination with <code>unet</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.inverse_scheduler",description:`<strong>inverse_scheduler</strong> (<a href="/docs/diffusers/pr_11477/en/api/schedulers/ddim_inverse#diffusers.DDIMInverseScheduler">DDIMInverseScheduler</a>) — | |
| A scheduler to be used in combination with <code>unet</code> to fill in the unmasked part of the input latents.`,name:"inverse_scheduler"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.safety_checker",description:`<strong>safety_checker</strong> (<code>StableDiffusionSafetyChecker</code>) — | |
| Classification module that estimates whether generated images could be considered offensive or harmful. | |
| Please refer to the <a href="https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5" rel="nofollow">model card</a> for | |
| more details about a model’s potential harms.`,name:"safety_checker"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.feature_extractor",description:`<strong>feature_extractor</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPImageProcessor" rel="nofollow">CLIPImageProcessor</a>) — | |
| A <code>CLIPImageProcessor</code> to extract features from generated images; used as inputs to the <code>safety_checker</code>.`,name:"feature_extractor"}],source:"https://github.com/huggingface/diffusers/blob/vr_11477/src/diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py#L244"}}),E=new gt({props:{warning:!0,$$slots:{default:[ht]},$$scope:{ctx:k}}}),H=new pe({props:{name:"generate_mask",anchor:"diffusers.StableDiffusionDiffEditPipeline.generate_mask",parameters:[{name:"image",val:": typing.Union[torch.Tensor, PIL.Image.Image] = None"},{name:"target_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"target_negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"target_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"target_negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"source_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"source_negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"source_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"source_negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"num_maps_per_mask",val:": typing.Optional[int] = 10"},{name:"mask_encode_strength",val:": typing.Optional[float] = 0.5"},{name:"mask_thresholding_ratio",val:": typing.Optional[float] = 3.0"},{name:"num_inference_steps",val:": int = 50"},{name:"guidance_scale",val:": float = 7.5"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"output_type",val:": typing.Optional[str] = 'np'"},{name:"cross_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"}],parametersDescription:[{anchor:"diffusers.StableDiffusionDiffEditPipeline.generate_mask.image",description:`<strong>image</strong> (<code>PIL.Image.Image</code>) — | |
| <code>Image</code> or tensor representing an image batch to be used for computing the mask.`,name:"image"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.generate_mask.target_prompt",description:`<strong>target_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide semantic mask generation. If not defined, you need to pass | |
| <code>prompt_embeds</code>.`,name:"target_prompt"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.generate_mask.target_negative_prompt",description:`<strong>target_negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide what to not include in image generation. If not defined, you need to | |
| pass <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (<code>guidance_scale < 1</code>).`,name:"target_negative_prompt"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.generate_mask.target_prompt_embeds",description:`<strong>target_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not | |
| provided, text embeddings are generated from the <code>prompt</code> input argument.`,name:"target_prompt_embeds"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.generate_mask.target_negative_prompt_embeds",description:`<strong>target_negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If | |
| not provided, <code>negative_prompt_embeds</code> are generated from the <code>negative_prompt</code> input argument.`,name:"target_negative_prompt_embeds"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.generate_mask.source_prompt",description:`<strong>source_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide semantic mask generation using DiffEdit. If not defined, you need to | |
| pass <code>source_prompt_embeds</code> or <code>source_image</code> instead.`,name:"source_prompt"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.generate_mask.source_negative_prompt",description:`<strong>source_negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide semantic mask generation away from using DiffEdit. If not defined, you | |
| need to pass <code>source_negative_prompt_embeds</code> or <code>source_image</code> instead.`,name:"source_negative_prompt"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.generate_mask.source_prompt_embeds",description:`<strong>source_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings to guide the semantic mask generation. Can be used to easily tweak text | |
| inputs (prompt weighting). If not provided, text embeddings are generated from <code>source_prompt</code> input | |
| argument.`,name:"source_prompt_embeds"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.generate_mask.source_negative_prompt_embeds",description:`<strong>source_negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings to negatively guide the semantic mask generation. Can be used to easily | |
| tweak text inputs (prompt weighting). If not provided, text embeddings are generated from | |
| <code>source_negative_prompt</code> input argument.`,name:"source_negative_prompt_embeds"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.generate_mask.num_maps_per_mask",description:`<strong>num_maps_per_mask</strong> (<code>int</code>, <em>optional</em>, defaults to 10) — | |
| The number of noise maps sampled to generate the semantic mask using DiffEdit.`,name:"num_maps_per_mask"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.generate_mask.mask_encode_strength",description:`<strong>mask_encode_strength</strong> (<code>float</code>, <em>optional</em>, defaults to 0.5) — | |
| The strength of the noise maps sampled to generate the semantic mask using DiffEdit. Must be between 0 | |
| and 1.`,name:"mask_encode_strength"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.generate_mask.mask_thresholding_ratio",description:`<strong>mask_thresholding_ratio</strong> (<code>float</code>, <em>optional</em>, defaults to 3.0) — | |
| The maximum multiple of the mean absolute difference used to clamp the semantic guidance map before | |
| mask binarization.`,name:"mask_thresholding_ratio"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.generate_mask.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.generate_mask.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 7.5) — | |
| A higher guidance scale value encourages the model to generate images closely linked to the text | |
| <code>prompt</code> at the expense of lower image quality. Guidance scale is enabled when <code>guidance_scale > 1</code>.`,name:"guidance_scale"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.generate_mask.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| A <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow"><code>torch.Generator</code></a> to make | |
| generation deterministic.`,name:"generator"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.generate_mask.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generated image. Choose between <code>PIL.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.generate_mask.cross_attention_kwargs",description:`<strong>cross_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the | |
| <a href="/docs/diffusers/pr_11477/en/api/attnprocessor#diffusers.models.attention_processor.AttnProcessor">AttnProcessor</a> as defined in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow"><code>self.processor</code></a>.`,name:"cross_attention_kwargs"}],source:"https://github.com/huggingface/diffusers/blob/vr_11477/src/diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py#L841",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>When returning a <code>List[PIL.Image.Image]</code>, the list consists of a batch of single-channel binary images | |
| with dimensions <code>(height // self.vae_scale_factor, width // self.vae_scale_factor)</code>. If it’s | |
| <code>np.array</code>, the shape is <code>(batch_size, height // self.vae_scale_factor, width // self.vae_scale_factor)</code>.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>List[PIL.Image.Image]</code> or <code>np.array</code></p> | |
| `}}),S=new Fe({props:{anchor:"diffusers.StableDiffusionDiffEditPipeline.generate_mask.example",$$slots:{default:[_t]},$$scope:{ctx:k}}}),Y=new pe({props:{name:"invert",anchor:"diffusers.StableDiffusionDiffEditPipeline.invert",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"image",val:": typing.Union[torch.Tensor, PIL.Image.Image] = None"},{name:"num_inference_steps",val:": int = 50"},{name:"inpaint_strength",val:": float = 0.8"},{name:"guidance_scale",val:": float = 7.5"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"decode_latents",val:": bool = False"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"callback",val:": typing.Optional[typing.Callable[[int, int, torch.Tensor], NoneType]] = None"},{name:"callback_steps",val:": typing.Optional[int] = 1"},{name:"cross_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"lambda_auto_corr",val:": float = 20.0"},{name:"lambda_kl",val:": float = 20.0"},{name:"num_reg_steps",val:": int = 0"},{name:"num_auto_corr_rolls",val:": int = 5"}],parametersDescription:[{anchor:"diffusers.StableDiffusionDiffEditPipeline.invert.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide image generation. If not defined, you need to pass <code>prompt_embeds</code>.`,name:"prompt"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.invert.image",description:`<strong>image</strong> (<code>PIL.Image.Image</code>) — | |
| <code>Image</code> or tensor representing an image batch to produce the inverted latents guided by <code>prompt</code>.`,name:"image"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.invert.inpaint_strength",description:`<strong>inpaint_strength</strong> (<code>float</code>, <em>optional</em>, defaults to 0.8) — | |
| Indicates extent of the noising process to run latent inversion. Must be between 0 and 1. When | |
| <code>inpaint_strength</code> is 1, the inversion process is run for the full number of iterations specified in | |
| <code>num_inference_steps</code>. <code>image</code> is used as a reference for the inversion process, and adding more noise | |
| increases <code>inpaint_strength</code>. If <code>inpaint_strength</code> is 0, no inpainting occurs.`,name:"inpaint_strength"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.invert.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.invert.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 7.5) — | |
| A higher guidance scale value encourages the model to generate images closely linked to the text | |
| <code>prompt</code> at the expense of lower image quality. Guidance scale is enabled when <code>guidance_scale > 1</code>.`,name:"guidance_scale"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.invert.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide what to not include in image generation. If not defined, you need to | |
| pass <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (<code>guidance_scale < 1</code>).`,name:"negative_prompt"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.invert.generator",description:`<strong>generator</strong> (<code>torch.Generator</code>, <em>optional</em>) — | |
| A <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow"><code>torch.Generator</code></a> to make | |
| generation deterministic.`,name:"generator"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.invert.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not | |
| provided, text embeddings are generated from the <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.invert.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If | |
| not provided, <code>negative_prompt_embeds</code> are generated from the <code>negative_prompt</code> input argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.invert.decode_latents",description:`<strong>decode_latents</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether or not to decode the inverted latents into a generated image. Setting this argument to <code>True</code> | |
| decodes all inverted latents for each timestep into a list of generated images.`,name:"decode_latents"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.invert.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generated image. Choose between <code>PIL.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.invert.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.stable_diffusion.DiffEditInversionPipelineOutput</code> instead of a | |
| plain tuple.`,name:"return_dict"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.invert.callback",description:`<strong>callback</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls every <code>callback_steps</code> steps during inference. The function is called with the | |
| following arguments: <code>callback(step: int, timestep: int, latents: torch.Tensor)</code>.`,name:"callback"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.invert.callback_steps",description:`<strong>callback_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The frequency at which the <code>callback</code> function is called. If not specified, the callback is called at | |
| every step.`,name:"callback_steps"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.invert.cross_attention_kwargs",description:`<strong>cross_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the | |
| <a href="/docs/diffusers/pr_11477/en/api/attnprocessor#diffusers.models.attention_processor.AttnProcessor">AttnProcessor</a> as defined in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow"><code>self.processor</code></a>.`,name:"cross_attention_kwargs"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.invert.lambda_auto_corr",description:`<strong>lambda_auto_corr</strong> (<code>float</code>, <em>optional</em>, defaults to 20.0) — | |
| Lambda parameter to control auto correction.`,name:"lambda_auto_corr"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.invert.lambda_kl",description:`<strong>lambda_kl</strong> (<code>float</code>, <em>optional</em>, defaults to 20.0) — | |
| Lambda parameter to control Kullback-Leibler divergence output.`,name:"lambda_kl"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.invert.num_reg_steps",description:`<strong>num_reg_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 0) — | |
| Number of regularization loss steps.`,name:"num_reg_steps"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.invert.num_auto_corr_rolls",description:`<strong>num_auto_corr_rolls</strong> (<code>int</code>, <em>optional</em>, defaults to 5) — | |
| Number of auto correction roll steps.`,name:"num_auto_corr_rolls"}],source:"https://github.com/huggingface/diffusers/blob/vr_11477/src/diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py#L1060",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.stable_diffusion.pipeline_stable_diffusion_diffedit.DiffEditInversionPipelineOutput</code> or | |
| <code>tuple</code>: | |
| If <code>return_dict</code> is <code>True</code>, | |
| <code>~pipelines.stable_diffusion.pipeline_stable_diffusion_diffedit.DiffEditInversionPipelineOutput</code> is | |
| returned, otherwise a <code>tuple</code> is returned where the first element is the inverted latents tensors | |
| ordered by increasing noise, and the second is the corresponding decoded images if <code>decode_latents</code> is | |
| <code>True</code>, otherwise <code>None</code>.</p> | |
| `}}),U=new Fe({props:{anchor:"diffusers.StableDiffusionDiffEditPipeline.invert.example",$$slots:{default:[bt]},$$scope:{ctx:k}}}),Q=new pe({props:{name:"__call__",anchor:"diffusers.StableDiffusionDiffEditPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"mask_image",val:": typing.Union[torch.Tensor, PIL.Image.Image] = None"},{name:"image_latents",val:": typing.Union[torch.Tensor, PIL.Image.Image] = None"},{name:"inpaint_strength",val:": typing.Optional[float] = 0.8"},{name:"num_inference_steps",val:": int = 50"},{name:"guidance_scale",val:": float = 7.5"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"eta",val:": float = 0.0"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"callback",val:": typing.Optional[typing.Callable[[int, int, torch.Tensor], NoneType]] = None"},{name:"callback_steps",val:": int = 1"},{name:"cross_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"clip_skip",val:": int = None"}],parametersDescription:[{anchor:"diffusers.StableDiffusionDiffEditPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide image generation. If not defined, you need to pass <code>prompt_embeds</code>.`,name:"prompt"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.__call__.mask_image",description:`<strong>mask_image</strong> (<code>PIL.Image.Image</code>) — | |
| <code>Image</code> or tensor representing an image batch to mask the generated image. White pixels in the mask are | |
| repainted, while black pixels are preserved. If <code>mask_image</code> is a PIL image, it is converted to a | |
| single channel (luminance) before use. If it’s a tensor, it should contain one color channel (L) | |
| instead of 3, so the expected shape would be <code>(B, 1, H, W)</code>.`,name:"mask_image"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.__call__.image_latents",description:`<strong>image_latents</strong> (<code>PIL.Image.Image</code> or <code>torch.Tensor</code>) — | |
| Partially noised image latents from the inversion process to be used as inputs for image generation.`,name:"image_latents"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.__call__.inpaint_strength",description:`<strong>inpaint_strength</strong> (<code>float</code>, <em>optional</em>, defaults to 0.8) — | |
| Indicates extent to inpaint the masked area. Must be between 0 and 1. When <code>inpaint_strength</code> is 1, the | |
| denoising process is run on the masked area for the full number of iterations specified in | |
| <code>num_inference_steps</code>. <code>image_latents</code> is used as a reference for the masked area, and adding more | |
| noise to a region increases <code>inpaint_strength</code>. If <code>inpaint_strength</code> is 0, no inpainting occurs.`,name:"inpaint_strength"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 7.5) — | |
| A higher guidance scale value encourages the model to generate images closely linked to the text | |
| <code>prompt</code> at the expense of lower image quality. Guidance scale is enabled when <code>guidance_scale > 1</code>.`,name:"guidance_scale"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide what to not include in image generation. If not defined, you need to | |
| pass <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (<code>guidance_scale < 1</code>).`,name:"negative_prompt"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) — | |
| Corresponds to parameter eta (η) from the <a href="https://arxiv.org/abs/2010.02502" rel="nofollow">DDIM</a> paper. Only applies | |
| to the <a href="/docs/diffusers/pr_11477/en/api/schedulers/ddim#diffusers.DDIMScheduler">DDIMScheduler</a>, and is ignored in other schedulers.`,name:"eta"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code>, <em>optional</em>) — | |
| A <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow"><code>torch.Generator</code></a> to make | |
| generation deterministic.`,name:"generator"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor is generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not | |
| provided, text embeddings are generated from the <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If | |
| not provided, <code>negative_prompt_embeds</code> are generated from the <code>negative_prompt</code> input argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generated image. Choose between <code>PIL.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <a href="/docs/diffusers/pr_11477/en/api/pipelines/stable_diffusion/latent_upscale#diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput">StableDiffusionPipelineOutput</a> instead of a | |
| plain tuple.`,name:"return_dict"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.__call__.callback",description:`<strong>callback</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls every <code>callback_steps</code> steps during inference. The function is called with the | |
| following arguments: <code>callback(step: int, timestep: int, latents: torch.Tensor)</code>.`,name:"callback"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.__call__.callback_steps",description:`<strong>callback_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The frequency at which the <code>callback</code> function is called. If not specified, the callback is called at | |
| every step.`,name:"callback_steps"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.__call__.cross_attention_kwargs",description:`<strong>cross_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow"><code>self.processor</code></a>.`,name:"cross_attention_kwargs"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.__call__.clip_skip",description:`<strong>clip_skip</strong> (<code>int</code>, <em>optional</em>) — | |
| Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that | |
| the output of the pre-final layer will be used for computing the prompt embeddings.`,name:"clip_skip"}],source:"https://github.com/huggingface/diffusers/blob/vr_11477/src/diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py#L1298",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>If <code>return_dict</code> is <code>True</code>, <a | |
| href="/docs/diffusers/pr_11477/en/api/pipelines/stable_diffusion/latent_upscale#diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput" | |
| >StableDiffusionPipelineOutput</a> is returned, | |
| otherwise a <code>tuple</code> is returned where the first element is a list with the generated images and the | |
| second element is a list of <code>bool</code>s indicating whether the corresponding generated image contains | |
| “not-safe-for-work” (nsfw) content.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><a | |
| href="/docs/diffusers/pr_11477/en/api/pipelines/stable_diffusion/latent_upscale#diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput" | |
| >StableDiffusionPipelineOutput</a> or <code>tuple</code></p> | |
| `}}),P=new Fe({props:{anchor:"diffusers.StableDiffusionDiffEditPipeline.__call__.example",$$slots:{default:[yt]},$$scope:{ctx:k}}}),q=new pe({props:{name:"encode_prompt",anchor:"diffusers.StableDiffusionDiffEditPipeline.encode_prompt",parameters:[{name:"prompt",val:""},{name:"device",val:""},{name:"num_images_per_prompt",val:""},{name:"do_classifier_free_guidance",val:""},{name:"negative_prompt",val:" = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"lora_scale",val:": typing.Optional[float] = None"},{name:"clip_skip",val:": typing.Optional[int] = None"}],parametersDescription:[{anchor:"diffusers.StableDiffusionDiffEditPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>): | |
| torch device`,name:"device"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>) — | |
| whether to use classifier free guidance or not`,name:"do_classifier_free_guidance"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is | |
| less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.encode_prompt.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>, <em>optional</em>) — | |
| A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.`,name:"lora_scale"},{anchor:"diffusers.StableDiffusionDiffEditPipeline.encode_prompt.clip_skip",description:`<strong>clip_skip</strong> (<code>int</code>, <em>optional</em>) — | |
| Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that | |
| the output of the pre-final layer will be used for computing the prompt embeddings.`,name:"clip_skip"}],source:"https://github.com/huggingface/diffusers/blob/vr_11477/src/diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py#L420"}}),A=new Ie({props:{title:"StableDiffusionPipelineOutput",local:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput",headingTag:"h2"}}),O=new pe({props:{name:"class diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput",anchor:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput",parameters:[{name:"images",val:": typing.Union[typing.List[PIL.Image.Image], numpy.ndarray]"},{name:"nsfw_content_detected",val:": typing.Optional[typing.List[bool]]"}],parametersDescription:[{anchor:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput.images",description:`<strong>images</strong> (<code>List[PIL.Image.Image]</code> or <code>np.ndarray</code>) — | |
| List of denoised PIL images of length <code>batch_size</code> or NumPy array of shape <code>(batch_size, height, width, num_channels)</code>.`,name:"images"},{anchor:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput.nsfw_content_detected",description:`<strong>nsfw_content_detected</strong> (<code>List[bool]</code>) — | |
| List indicating whether the corresponding generated image contains “not-safe-for-work” (nsfw) content or | |
| <code>None</code> if safety checking could not be performed.`,name:"nsfw_content_detected"}],source:"https://github.com/huggingface/diffusers/blob/vr_11477/src/diffusers/pipelines/stable_diffusion/pipeline_output.py#L10"}}),K=new ut({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/diffedit.md"}}),{c(){n=d("meta"),m=o(),i=d("p"),w=o(),g($.$$.fragment),ce=o(),B=d("p"),B.innerHTML=Ye,fe=o(),L=d("p"),L.textContent=Qe,me=o(),G=d("p"),G.innerHTML=qe,ge=o(),N=d("p"),N.innerHTML=Ae,ue=o(),R=d("p"),R.innerHTML=Oe,he=o(),g(V.$$.fragment),_e=o(),X=d("ul"),X.innerHTML=Ke,be=o(),g(z.$$.fragment),ye=o(),p=d("div"),g(F.$$.fragment),Ze=o(),g(E.$$.fragment),Ee=o(),ee=d("p"),ee.textContent=et,Se=o(),te=d("p"),te.innerHTML=tt,Ue=o(),ne=d("p"),ne.textContent=nt,Pe=o(),ie=d("ul"),ie.innerHTML=it,je=o(),D=d("div"),g(H.$$.fragment),Ce=o(),se=d("p"),se.textContent=st,We=o(),g(S.$$.fragment),$e=o(),T=d("div"),g(Y.$$.fragment),Be=o(),oe=d("p"),oe.textContent=ot,Le=o(),g(U.$$.fragment),Ge=o(),x=d("div"),g(Q.$$.fragment),Ne=o(),ae=d("p"),ae.textContent=at,Re=o(),g(P.$$.fragment),Ve=o(),j=d("div"),g(q.$$.fragment),Xe=o(),re=d("p"),re.textContent=rt,ve=o(),g(A.$$.fragment),we=o(),M=d("div"),g(O.$$.fragment),ze=o(),le=d("p"),le.textContent=lt,ke=o(),g(K.$$.fragment),De=o(),de=d("p"),this.h()},l(e){const t=mt("svelte-u9bgzb",document.head);n=c(t,"META",{name:!0,content:!0}),t.forEach(s),m=a(e),i=c(e,"P",{}),C(i).forEach(s),w=a(e),u($.$$.fragment,e),ce=a(e),B=c(e,"P",{"data-svelte-h":!0}),v(B)!=="svelte-1d7l3jz"&&(B.innerHTML=Ye),fe=a(e),L=c(e,"P",{"data-svelte-h":!0}),v(L)!=="svelte-1cwsb16"&&(L.textContent=Qe),me=a(e),G=c(e,"P",{"data-svelte-h":!0}),v(G)!=="svelte-5qswic"&&(G.innerHTML=qe),ge=a(e),N=c(e,"P",{"data-svelte-h":!0}),v(N)!=="svelte-9tfjar"&&(N.innerHTML=Ae),ue=a(e),R=c(e,"P",{"data-svelte-h":!0}),v(R)!=="svelte-1yyqs9v"&&(R.innerHTML=Oe),he=a(e),u(V.$$.fragment,e),_e=a(e),X=c(e,"UL",{"data-svelte-h":!0}),v(X)!=="svelte-s72hu"&&(X.innerHTML=Ke),be=a(e),u(z.$$.fragment,e),ye=a(e),p=c(e,"DIV",{class:!0});var f=C(p);u(F.$$.fragment,f),Ze=a(f),u(E.$$.fragment,f),Ee=a(f),ee=c(f,"P",{"data-svelte-h":!0}),v(ee)!=="svelte-1odvtwz"&&(ee.textContent=et),Se=a(f),te=c(f,"P",{"data-svelte-h":!0}),v(te)!=="svelte-25rx3g"&&(te.innerHTML=tt),Ue=a(f),ne=c(f,"P",{"data-svelte-h":!0}),v(ne)!=="svelte-145c10v"&&(ne.textContent=nt),Pe=a(f),ie=c(f,"UL",{"data-svelte-h":!0}),v(ie)!=="svelte-qf5usi"&&(ie.innerHTML=it),je=a(f),D=c(f,"DIV",{class:!0});var I=C(D);u(H.$$.fragment,I),Ce=a(I),se=c(I,"P",{"data-svelte-h":!0}),v(se)!=="svelte-e123jz"&&(se.textContent=st),We=a(I),u(S.$$.fragment,I),I.forEach(s),$e=a(f),T=c(f,"DIV",{class:!0});var J=C(T);u(Y.$$.fragment,J),Be=a(J),oe=c(J,"P",{"data-svelte-h":!0}),v(oe)!=="svelte-1grehch"&&(oe.textContent=ot),Le=a(J),u(U.$$.fragment,J),J.forEach(s),Ge=a(f),x=c(f,"DIV",{class:!0});var Z=C(x);u(Q.$$.fragment,Z),Ne=a(Z),ae=c(Z,"P",{"data-svelte-h":!0}),v(ae)!=="svelte-50j04k"&&(ae.textContent=at),Re=a(Z),u(P.$$.fragment,Z),Z.forEach(s),Ve=a(f),j=c(f,"DIV",{class:!0});var xe=C(j);u(q.$$.fragment,xe),Xe=a(xe),re=c(xe,"P",{"data-svelte-h":!0}),v(re)!=="svelte-16q0ax1"&&(re.textContent=rt),xe.forEach(s),f.forEach(s),ve=a(e),u(A.$$.fragment,e),we=a(e),M=c(e,"DIV",{class:!0});var Me=C(M);u(O.$$.fragment,Me),ze=a(Me),le=c(Me,"P",{"data-svelte-h":!0}),v(le)!=="svelte-1qpjiuf"&&(le.textContent=lt),Me.forEach(s),ke=a(e),u(K.$$.fragment,e),De=a(e),de=c(e,"P",{}),C(de).forEach(s),this.h()},h(){W(n,"name","hf:doc:metadata"),W(n,"content",wt),W(D,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),W(T,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),W(x,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),W(j,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),W(p,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),W(M,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,t){r(document.head,n),l(e,m,t),l(e,i,t),l(e,w,t),h($,e,t),l(e,ce,t),l(e,B,t),l(e,fe,t),l(e,L,t),l(e,me,t),l(e,G,t),l(e,ge,t),l(e,N,t),l(e,ue,t),l(e,R,t),l(e,he,t),h(V,e,t),l(e,_e,t),l(e,X,t),l(e,be,t),h(z,e,t),l(e,ye,t),l(e,p,t),h(F,p,null),r(p,Ze),h(E,p,null),r(p,Ee),r(p,ee),r(p,Se),r(p,te),r(p,Ue),r(p,ne),r(p,Pe),r(p,ie),r(p,je),r(p,D),h(H,D,null),r(D,Ce),r(D,se),r(D,We),h(S,D,null),r(p,$e),r(p,T),h(Y,T,null),r(T,Be),r(T,oe),r(T,Le),h(U,T,null),r(p,Ge),r(p,x),h(Q,x,null),r(x,Ne),r(x,ae),r(x,Re),h(P,x,null),r(p,Ve),r(p,j),h(q,j,null),r(j,Xe),r(j,re),l(e,ve,t),h(A,e,t),l(e,we,t),l(e,M,t),h(O,M,null),r(M,ze),r(M,le),l(e,ke,t),h(K,e,t),l(e,De,t),l(e,de,t),Te=!0},p(e,[t]){const f={};t&2&&(f.$$scope={dirty:t,ctx:e}),E.$set(f);const I={};t&2&&(I.$$scope={dirty:t,ctx:e}),S.$set(I);const J={};t&2&&(J.$$scope={dirty:t,ctx:e}),U.$set(J);const Z={};t&2&&(Z.$$scope={dirty:t,ctx:e}),P.$set(Z)},i(e){Te||(_($.$$.fragment,e),_(V.$$.fragment,e),_(z.$$.fragment,e),_(F.$$.fragment,e),_(E.$$.fragment,e),_(H.$$.fragment,e),_(S.$$.fragment,e),_(Y.$$.fragment,e),_(U.$$.fragment,e),_(Q.$$.fragment,e),_(P.$$.fragment,e),_(q.$$.fragment,e),_(A.$$.fragment,e),_(O.$$.fragment,e),_(K.$$.fragment,e),Te=!0)},o(e){b($.$$.fragment,e),b(V.$$.fragment,e),b(z.$$.fragment,e),b(F.$$.fragment,e),b(E.$$.fragment,e),b(H.$$.fragment,e),b(S.$$.fragment,e),b(Y.$$.fragment,e),b(U.$$.fragment,e),b(Q.$$.fragment,e),b(P.$$.fragment,e),b(q.$$.fragment,e),b(A.$$.fragment,e),b(O.$$.fragment,e),b(K.$$.fragment,e),Te=!1},d(e){e&&(s(m),s(i),s(w),s(ce),s(B),s(fe),s(L),s(me),s(G),s(ge),s(N),s(ue),s(R),s(he),s(_e),s(X),s(be),s(ye),s(p),s(ve),s(we),s(M),s(ke),s(De),s(de)),s(n),y($,e),y(V,e),y(z,e),y(F),y(E),y(H),y(S),y(Y),y(U),y(Q),y(P),y(q),y(A,e),y(O),y(K,e)}}}const wt='{"title":"DiffEdit","local":"diffedit","sections":[{"title":"Tips","local":"tips","sections":[],"depth":2},{"title":"StableDiffusionDiffEditPipeline","local":"diffusers.StableDiffusionDiffEditPipeline","sections":[],"depth":2},{"title":"StableDiffusionPipelineOutput","local":"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput","sections":[],"depth":2}],"depth":1}';function kt(k){return dt(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Et extends ct{constructor(n){super(),ft(this,n,kt,vt,pt,{})}}export{Et as component}; | |
Xet Storage Details
- Size:
- 64.5 kB
- Xet hash:
- c56e675e2ce76f3778baec4bf5f2545a8815d63b493a849e0b7bd15f96bc3e07
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.