Buckets:
| import{s as hn,o as _n,n as K}from"../chunks/scheduler.8c3d61f6.js";import{S as bn,i as yn,g as l,s as i,r as b,A as vn,h as d,f as r,c as a,j as U,u as y,x as h,k as D,y as o,a as u,v,d as w,t as x,w as I}from"../chunks/index.da70eac4.js";import{T as zt}from"../chunks/Tip.6f698f24.js";import{D as C}from"../chunks/Docstring.634d8861.js";import{C as Ee}from"../chunks/CodeBlock.a9c4becf.js";import{E as Xe}from"../chunks/ExampleCodeBlock.f879b663.js";import{H as at,E as wn}from"../chunks/getInferenceSnippets.ea1775db.js";function xn($){let n,M='Make sure to check out the Stable Diffusion <a href="overview#tips">Tips</a> section to learn how to explore the tradeoff between scheduler speed and quality, and how to reuse pipeline components efficiently!',p,s,f='If you’re interested in using one of the official checkpoints for a task, explore the <a href="https://huggingface.co/CompVis" rel="nofollow">CompVis</a>, <a href="https://huggingface.co/runwayml" rel="nofollow">Runway</a>, and <a href="https://huggingface.co/stabilityai" rel="nofollow">Stability AI</a> Hub organizations!';return{c(){n=l("p"),n.innerHTML=M,p=i(),s=l("p"),s.innerHTML=f},l(t){n=d(t,"P",{"data-svelte-h":!0}),h(n)!=="svelte-1j961ct"&&(n.innerHTML=M),p=a(t),s=d(t,"P",{"data-svelte-h":!0}),h(s)!=="svelte-z4pn9c"&&(s.innerHTML=f)},m(t,_){u(t,n,_),u(t,p,_),u(t,s,_)},p:K,d(t){t&&(r(n),r(p),r(s))}}}function In($){let n,M="Examples:",p,s,f;return s=new Ee({props:{code:"aW1wb3J0JTIwUElMJTBBaW1wb3J0JTIwcmVxdWVzdHMlMEFpbXBvcnQlMjB0b3JjaCUwQWZyb20lMjBpbyUyMGltcG9ydCUyMEJ5dGVzSU8lMEElMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwU3RhYmxlRGlmZnVzaW9uSW5wYWludFBpcGVsaW5lJTBBJTBBJTBBZGVmJTIwZG93bmxvYWRfaW1hZ2UodXJsKSUzQSUwQSUyMCUyMCUyMCUyMHJlc3BvbnNlJTIwJTNEJTIwcmVxdWVzdHMuZ2V0KHVybCklMEElMjAlMjAlMjAlMjByZXR1cm4lMjBQSUwuSW1hZ2Uub3BlbihCeXRlc0lPKHJlc3BvbnNlLmNvbnRlbnQpKS5jb252ZXJ0KCUyMlJHQiUyMiklMEElMEElMEFpbWdfdXJsJTIwJTNEJTIwJTIyaHR0cHMlM0ElMkYlMkZyYXcuZ2l0aHVidXNlcmNvbnRlbnQuY29tJTJGQ29tcFZpcyUyRmxhdGVudC1kaWZmdXNpb24lMkZtYWluJTJGZGF0YSUyRmlucGFpbnRpbmdfZXhhbXBsZXMlMkZvdmVydHVyZS1jcmVhdGlvbnMtNXNJNmZRZ1lJdW8ucG5nJTIyJTBBbWFza191cmwlMjAlM0QlMjAlMjJodHRwcyUzQSUyRiUyRnJhdy5naXRodWJ1c2VyY29udGVudC5jb20lMkZDb21wVmlzJTJGbGF0ZW50LWRpZmZ1c2lvbiUyRm1haW4lMkZkYXRhJTJGaW5wYWludGluZ19leGFtcGxlcyUyRm92ZXJ0dXJlLWNyZWF0aW9ucy01c0k2ZlFnWUl1b19tYXNrLnBuZyUyMiUwQSUwQWluaXRfaW1hZ2UlMjAlM0QlMjBkb3dubG9hZF9pbWFnZShpbWdfdXJsKS5yZXNpemUoKDUxMiUyQyUyMDUxMikpJTBBbWFza19pbWFnZSUyMCUzRCUyMGRvd25sb2FkX2ltYWdlKG1hc2tfdXJsKS5yZXNpemUoKDUxMiUyQyUyMDUxMikpJTBBJTBBcGlwZSUyMCUzRCUyMFN0YWJsZURpZmZ1c2lvbklucGFpbnRQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyc3RhYmxlLWRpZmZ1c2lvbi12MS01JTJGc3RhYmxlLWRpZmZ1c2lvbi1pbnBhaW50aW5nJTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTBBKSUwQXBpcGUlMjAlM0QlMjBwaXBlLnRvKCUyMmN1ZGElMjIpJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTIyRmFjZSUyMG9mJTIwYSUyMHllbGxvdyUyMGNhdCUyQyUyMGhpZ2glMjByZXNvbHV0aW9uJTJDJTIwc2l0dGluZyUyMG9uJTIwYSUyMHBhcmslMjBiZW5jaCUyMiUwQWltYWdlJTIwJTNEJTIwcGlwZShwcm9tcHQlM0Rwcm9tcHQlMkMlMjBpbWFnZSUzRGluaXRfaW1hZ2UlMkMlMjBtYXNrX2ltYWdlJTNEbWFza19pbWFnZSkuaW1hZ2VzJTVCMCU1RA==",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> PIL | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> requests | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> io <span class="hljs-keyword">import</span> BytesIO | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionInpaintPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">download_image</span>(<span class="hljs-params">url</span>): | |
| <span class="hljs-meta">... </span> response = requests.get(url) | |
| <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> PIL.Image.<span class="hljs-built_in">open</span>(BytesIO(response.content)).convert(<span class="hljs-string">"RGB"</span>) | |
| <span class="hljs-meta">>>> </span>img_url = <span class="hljs-string">"https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"</span> | |
| <span class="hljs-meta">>>> </span>mask_url = <span class="hljs-string">"https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"</span> | |
| <span class="hljs-meta">>>> </span>init_image = download_image(img_url).resize((<span class="hljs-number">512</span>, <span class="hljs-number">512</span>)) | |
| <span class="hljs-meta">>>> </span>mask_image = download_image(mask_url).resize((<span class="hljs-number">512</span>, <span class="hljs-number">512</span>)) | |
| <span class="hljs-meta">>>> </span>pipe = StableDiffusionInpaintPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"stable-diffusion-v1-5/stable-diffusion-inpainting"</span>, torch_dtype=torch.float16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipe = pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"Face of a yellow cat, high resolution, sitting on a park bench"</span> | |
| <span class="hljs-meta">>>> </span>image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){n=l("p"),n.textContent=M,p=i(),b(s.$$.fragment)},l(t){n=d(t,"P",{"data-svelte-h":!0}),h(n)!=="svelte-kvfsh7"&&(n.textContent=M),p=a(t),y(s.$$.fragment,t)},m(t,_){u(t,n,_),u(t,p,_),v(s,t,_),f=!0},p:K,i(t){f||(w(s.$$.fragment,t),f=!0)},o(t){x(s.$$.fragment,t),f=!1},d(t){t&&(r(n),r(p)),I(s,t)}}}function Mn($){let n,M=`⚠️ Don’t enable attention slicing if you’re already using <code>scaled_dot_product_attention</code> (SDPA) from PyTorch | |
| 2.0 or xFormers. These attention computations are already very memory efficient so you won’t need to enable | |
| this function. If you enable attention slicing with SDPA or xFormers, it can lead to serious slow downs!`;return{c(){n=l("p"),n.innerHTML=M},l(p){n=d(p,"P",{"data-svelte-h":!0}),h(n)!=="svelte-ackzsn"&&(n.innerHTML=M)},m(p,s){u(p,n,s)},p:K,d(p){p&&r(n)}}}function $n($){let n,M="Examples:",p,s,f;return s=new Ee({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwU3RhYmxlRGlmZnVzaW9uUGlwZWxpbmUlMEElMEFwaXBlJTIwJTNEJTIwU3RhYmxlRGlmZnVzaW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMnN0YWJsZS1kaWZmdXNpb24tdjEtNSUyRnN0YWJsZS1kaWZmdXNpb24tdjEtNSUyMiUyQyUwQSUyMCUyMCUyMCUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUyQyUwQSUyMCUyMCUyMCUyMHVzZV9zYWZldGVuc29ycyUzRFRydWUlMkMlMEEpJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTIyYSUyMHBob3RvJTIwb2YlMjBhbiUyMGFzdHJvbmF1dCUyMHJpZGluZyUyMGElMjBob3JzZSUyMG9uJTIwbWFycyUyMiUwQXBpcGUuZW5hYmxlX2F0dGVudGlvbl9zbGljaW5nKCklMEFpbWFnZSUyMCUzRCUyMHBpcGUocHJvbXB0KS5pbWFnZXMlNUIwJTVE",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionPipeline | |
| <span class="hljs-meta">>>> </span>pipe = StableDiffusionPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"stable-diffusion-v1-5/stable-diffusion-v1-5"</span>, | |
| <span class="hljs-meta">... </span> torch_dtype=torch.float16, | |
| <span class="hljs-meta">... </span> use_safetensors=<span class="hljs-literal">True</span>, | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"a photo of an astronaut riding a horse on mars"</span> | |
| <span class="hljs-meta">>>> </span>pipe.enable_attention_slicing() | |
| <span class="hljs-meta">>>> </span>image = pipe(prompt).images[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){n=l("p"),n.textContent=M,p=i(),b(s.$$.fragment)},l(t){n=d(t,"P",{"data-svelte-h":!0}),h(n)!=="svelte-kvfsh7"&&(n.textContent=M),p=a(t),y(s.$$.fragment,t)},m(t,_){u(t,n,_),u(t,p,_),v(s,t,_),f=!0},p:K,i(t){f||(w(s.$$.fragment,t),f=!0)},o(t){x(s.$$.fragment,t),f=!1},d(t){t&&(r(n),r(p)),I(s,t)}}}function Tn($){let n,M=`⚠️ When memory efficient attention and sliced attention are both enabled, memory efficient attention takes | |
| precedent.`;return{c(){n=l("p"),n.textContent=M},l(p){n=d(p,"P",{"data-svelte-h":!0}),h(n)!=="svelte-17p1lpg"&&(n.textContent=M)},m(p,s){u(p,n,s)},p:K,d(p){p&&r(n)}}}function kn($){let n,M="Examples:",p,s,f;return s=new Ee({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwRGlmZnVzaW9uUGlwZWxpbmUlMEFmcm9tJTIweGZvcm1lcnMub3BzJTIwaW1wb3J0JTIwTWVtb3J5RWZmaWNpZW50QXR0ZW50aW9uRmxhc2hBdHRlbnRpb25PcCUwQSUwQXBpcGUlMjAlM0QlMjBEaWZmdXNpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTIyc3RhYmlsaXR5YWklMkZzdGFibGUtZGlmZnVzaW9uLTItMSUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiklMEFwaXBlJTIwJTNEJTIwcGlwZS50byglMjJjdWRhJTIyKSUwQXBpcGUuZW5hYmxlX3hmb3JtZXJzX21lbW9yeV9lZmZpY2llbnRfYXR0ZW50aW9uKGF0dGVudGlvbl9vcCUzRE1lbW9yeUVmZmljaWVudEF0dGVudGlvbkZsYXNoQXR0ZW50aW9uT3ApJTBBJTIzJTIwV29ya2Fyb3VuZCUyMGZvciUyMG5vdCUyMGFjY2VwdGluZyUyMGF0dGVudGlvbiUyMHNoYXBlJTIwdXNpbmclMjBWQUUlMjBmb3IlMjBGbGFzaCUyMEF0dGVudGlvbiUwQXBpcGUudmFlLmVuYWJsZV94Zm9ybWVyc19tZW1vcnlfZWZmaWNpZW50X2F0dGVudGlvbihhdHRlbnRpb25fb3AlM0ROb25lKQ==",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> DiffusionPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> xformers.ops <span class="hljs-keyword">import</span> MemoryEfficientAttentionFlashAttentionOp | |
| <span class="hljs-meta">>>> </span>pipe = DiffusionPipeline.from_pretrained(<span class="hljs-string">"stabilityai/stable-diffusion-2-1"</span>, torch_dtype=torch.float16) | |
| <span class="hljs-meta">>>> </span>pipe = pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>pipe.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp) | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Workaround for not accepting attention shape using VAE for Flash Attention</span> | |
| <span class="hljs-meta">>>> </span>pipe.vae.enable_xformers_memory_efficient_attention(attention_op=<span class="hljs-literal">None</span>)`,wrap:!1}}),{c(){n=l("p"),n.textContent=M,p=i(),b(s.$$.fragment)},l(t){n=d(t,"P",{"data-svelte-h":!0}),h(n)!=="svelte-kvfsh7"&&(n.textContent=M),p=a(t),y(s.$$.fragment,t)},m(t,_){u(t,n,_),u(t,p,_),v(s,t,_),f=!0},p:K,i(t){f||(w(s.$$.fragment,t),f=!0)},o(t){x(s.$$.fragment,t),f=!1},d(t){t&&(r(n),r(p)),I(s,t)}}}function Pn($){let n,M="To load a Textual Inversion embedding vector in 🤗 Diffusers format:",p,s,f;return s=new Ee({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMFN0YWJsZURpZmZ1c2lvblBpcGVsaW5lJTBBaW1wb3J0JTIwdG9yY2glMEElMEFtb2RlbF9pZCUyMCUzRCUyMCUyMnN0YWJsZS1kaWZmdXNpb24tdjEtNSUyRnN0YWJsZS1kaWZmdXNpb24tdjEtNSUyMiUwQXBpcGUlMjAlM0QlMjBTdGFibGVEaWZmdXNpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQobW9kZWxfaWQlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYpLnRvKCUyMmN1ZGElMjIpJTBBJTBBcGlwZS5sb2FkX3RleHR1YWxfaW52ZXJzaW9uKCUyMnNkLWNvbmNlcHRzLWxpYnJhcnklMkZjYXQtdG95JTIyKSUwQSUwQXByb21wdCUyMCUzRCUyMCUyMkElMjAlM0NjYXQtdG95JTNFJTIwYmFja3BhY2slMjIlMEElMEFpbWFnZSUyMCUzRCUyMHBpcGUocHJvbXB0JTJDJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDUwKS5pbWFnZXMlNUIwJTVEJTBBaW1hZ2Uuc2F2ZSglMjJjYXQtYmFja3BhY2sucG5nJTIyKQ==",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionPipeline | |
| <span class="hljs-keyword">import</span> torch | |
| model_id = <span class="hljs-string">"stable-diffusion-v1-5/stable-diffusion-v1-5"</span> | |
| pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to(<span class="hljs-string">"cuda"</span>) | |
| pipe.load_textual_inversion(<span class="hljs-string">"sd-concepts-library/cat-toy"</span>) | |
| prompt = <span class="hljs-string">"A <cat-toy> backpack"</span> | |
| image = pipe(prompt, num_inference_steps=<span class="hljs-number">50</span>).images[<span class="hljs-number">0</span>] | |
| image.save(<span class="hljs-string">"cat-backpack.png"</span>)`,wrap:!1}}),{c(){n=l("p"),n.textContent=M,p=i(),b(s.$$.fragment)},l(t){n=d(t,"P",{"data-svelte-h":!0}),h(n)!=="svelte-1gc783q"&&(n.textContent=M),p=a(t),y(s.$$.fragment,t)},m(t,_){u(t,n,_),u(t,p,_),v(s,t,_),f=!0},p:K,i(t){f||(w(s.$$.fragment,t),f=!0)},o(t){x(s.$$.fragment,t),f=!1},d(t){t&&(r(n),r(p)),I(s,t)}}}function Sn($){let n,M="locally:",p,s,f;return s=new Ee({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMFN0YWJsZURpZmZ1c2lvblBpcGVsaW5lJTBBaW1wb3J0JTIwdG9yY2glMEElMEFtb2RlbF9pZCUyMCUzRCUyMCUyMnN0YWJsZS1kaWZmdXNpb24tdjEtNSUyRnN0YWJsZS1kaWZmdXNpb24tdjEtNSUyMiUwQXBpcGUlMjAlM0QlMjBTdGFibGVEaWZmdXNpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQobW9kZWxfaWQlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYpLnRvKCUyMmN1ZGElMjIpJTBBJTBBcGlwZS5sb2FkX3RleHR1YWxfaW52ZXJzaW9uKCUyMi4lMkZjaGFydHVybmVydjIucHQlMjIlMkMlMjB0b2tlbiUzRCUyMmNoYXJ0dXJuZXJ2MiUyMiklMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJjaGFydHVybmVydjIlMkMlMjBtdWx0aXBsZSUyMHZpZXdzJTIwb2YlMjB0aGUlMjBzYW1lJTIwY2hhcmFjdGVyJTIwaW4lMjB0aGUlMjBzYW1lJTIwb3V0Zml0JTJDJTIwYSUyMGNoYXJhY3RlciUyMHR1cm5hcm91bmQlMjBvZiUyMGElMjB3b21hbiUyMHdlYXJpbmclMjBhJTIwYmxhY2slMjBqYWNrZXQlMjBhbmQlMjByZWQlMjBzaGlydCUyQyUyMGJlc3QlMjBxdWFsaXR5JTJDJTIwaW50cmljYXRlJTIwZGV0YWlscy4lMjIlMEElMEFpbWFnZSUyMCUzRCUyMHBpcGUocHJvbXB0JTJDJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDUwKS5pbWFnZXMlNUIwJTVEJTBBaW1hZ2Uuc2F2ZSglMjJjaGFyYWN0ZXIucG5nJTIyKQ==",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionPipeline | |
| <span class="hljs-keyword">import</span> torch | |
| model_id = <span class="hljs-string">"stable-diffusion-v1-5/stable-diffusion-v1-5"</span> | |
| pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to(<span class="hljs-string">"cuda"</span>) | |
| pipe.load_textual_inversion(<span class="hljs-string">"./charturnerv2.pt"</span>, token=<span class="hljs-string">"charturnerv2"</span>) | |
| prompt = <span class="hljs-string">"charturnerv2, multiple views of the same character in the same outfit, a character turnaround of a woman wearing a black jacket and red shirt, best quality, intricate details."</span> | |
| image = pipe(prompt, num_inference_steps=<span class="hljs-number">50</span>).images[<span class="hljs-number">0</span>] | |
| image.save(<span class="hljs-string">"character.png"</span>)`,wrap:!1}}),{c(){n=l("p"),n.textContent=M,p=i(),b(s.$$.fragment)},l(t){n=d(t,"P",{"data-svelte-h":!0}),h(n)!=="svelte-4c75kq"&&(n.textContent=M),p=a(t),y(s.$$.fragment,t)},m(t,_){u(t,n,_),u(t,p,_),v(s,t,_),f=!0},p:K,i(t){f||(w(s.$$.fragment,t),f=!0)},o(t){x(s.$$.fragment,t),f=!1},d(t){t&&(r(n),r(p)),I(s,t)}}}function Dn($){let n,M,p,s,f,t,_,Ht='<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>',ze,ee,Ft="The Stable Diffusion model can also be applied to inpainting which lets you edit specific parts of an image by providing a mask and a text prompt using Stable Diffusion.",He,te,Fe,ne,Vt=`It is recommended to use this pipeline with checkpoints that have been specifically fine-tuned for inpainting, such | |
| as <a href="https://huggingface.co/runwayml/stable-diffusion-inpainting" rel="nofollow">runwayml/stable-diffusion-inpainting</a>. Default | |
| text-to-image Stable Diffusion checkpoints, such as | |
| <a href="https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5" rel="nofollow">stable-diffusion-v1-5/stable-diffusion-v1-5</a> are also compatible but they might be less performant.`,Ve,N,Ye,oe,Ae,m,ie,st,ve,Yt="Pipeline for text-guided image inpainting using Stable Diffusion.",rt,we,At=`This model inherits from <a href="/docs/diffusers/pr_12403/en/api/pipelines/overview#diffusers.DiffusionPipeline">DiffusionPipeline</a>. Check the superclass documentation for the generic methods | |
| implemented for all pipelines (downloading, saving, running on a particular device, etc.).`,lt,xe,qt="The pipeline also inherits the following loading methods:",dt,Ie,Qt='<li><a href="/docs/diffusers/pr_12403/en/api/loaders/textual_inversion#diffusers.loaders.TextualInversionLoaderMixin.load_textual_inversion">load_textual_inversion()</a> for loading textual inversion embeddings</li> <li><a href="/docs/diffusers/pr_12403/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_weights">load_lora_weights()</a> for loading LoRA weights</li> <li><a href="/docs/diffusers/pr_12403/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.save_lora_weights">save_lora_weights()</a> for saving LoRA weights</li> <li><a href="/docs/diffusers/pr_12403/en/api/loaders/ip_adapter#diffusers.loaders.IPAdapterMixin.load_ip_adapter">load_ip_adapter()</a> for loading IP Adapters</li> <li><a href="/docs/diffusers/pr_12403/en/api/loaders/single_file#diffusers.loaders.FromSingleFileMixin.from_single_file">from_single_file()</a> for loading <code>.ckpt</code> files</li>',pt,W,ae,ct,Me,Ot="The call function to the pipeline for generation.",ft,R,mt,J,se,ut,$e,Kt=`Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor | |
| in slices to compute attention in several steps. For more than one attention head, the computation is performed | |
| sequentially over each head. This is useful to save some memory in exchange for a small speed decrease.`,gt,X,ht,E,_t,z,re,bt,Te,en=`Disable sliced attention computation. If <code>enable_attention_slicing</code> was previously called, attention is | |
| computed in one step.`,yt,j,le,vt,ke,tn=`Enable memory efficient attention from <a href="https://facebookresearch.github.io/xformers/" rel="nofollow">xFormers</a>. When this | |
| option is enabled, you should observe lower GPU memory usage and a potential speed up during inference. Speed | |
| up during training is not guaranteed.`,wt,H,xt,F,It,V,de,Mt,Pe,nn='Disable memory efficient attention from <a href="https://facebookresearch.github.io/xformers/" rel="nofollow">xFormers</a>.',$t,T,pe,Tt,Se,on=`Load Textual Inversion embeddings into the text encoder of <a href="/docs/diffusers/pr_12403/en/api/pipelines/stable_diffusion/text2img#diffusers.StableDiffusionPipeline">StableDiffusionPipeline</a> (both 🤗 Diffusers and | |
| Automatic1111 formats are supported).`,kt,De,an="Example:",Pt,Y,St,Ue,sn=`To load a Textual Inversion embedding vector in Automatic1111 format, make sure to download the vector first | |
| (for example from <a href="https://civitai.com/models/3036?modelVersionId=9857" rel="nofollow">civitAI</a>) and then load the vector`,Dt,A,Ut,k,ce,Jt,Je,rn=`Load LoRA weights specified in <code>pretrained_model_name_or_path_or_dict</code> into <code>self.unet</code> and | |
| <code>self.text_encoder</code>.`,jt,je,ln="All kwargs are forwarded to <code>self.lora_state_dict</code>.",Ct,Ce,dn=`See <a href="/docs/diffusers/pr_12403/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict">lora_state_dict()</a> for more details on how the state dict is | |
| loaded.`,Lt,Le,pn=`See <a href="/docs/diffusers/pr_12403/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet">load_lora_into_unet()</a> for more details on how the state dict is | |
| loaded into <code>self.unet</code>.`,Zt,Ze,cn=`See <a href="/docs/diffusers/pr_12403/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder">load_lora_into_text_encoder()</a> for more details on how the state | |
| dict is loaded into <code>self.text_encoder</code>.`,Wt,q,fe,Gt,We,fn="Save the LoRA parameters corresponding to the UNet and text encoder.",Bt,Q,me,Nt,Ge,mn="Encodes the prompt into text encoder hidden states.",Rt,O,ue,Xt,Be,un='See <a href="https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298" rel="nofollow">https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298</a>',qe,ge,Qe,G,he,Et,Ne,gn="Output class for Stable Diffusion pipelines.",Oe,_e,Ke,Re,et;return f=new at({props:{title:"Inpainting",local:"inpainting",headingTag:"h1"}}),te=new at({props:{title:"Tips",local:"tips",headingTag:"h2"}}),N=new zt({props:{$$slots:{default:[xn]},$$scope:{ctx:$}}}),oe=new at({props:{title:"StableDiffusionInpaintPipeline",local:"diffusers.StableDiffusionInpaintPipeline",headingTag:"h2"}}),ie=new C({props:{name:"class diffusers.StableDiffusionInpaintPipeline",anchor:"diffusers.StableDiffusionInpaintPipeline",parameters:[{name:"vae",val:": typing.Union[diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL, diffusers.models.autoencoders.autoencoder_asym_kl.AsymmetricAutoencoderKL]"},{name:"text_encoder",val:": CLIPTextModel"},{name:"tokenizer",val:": CLIPTokenizer"},{name:"unet",val:": UNet2DConditionModel"},{name:"scheduler",val:": KarrasDiffusionSchedulers"},{name:"safety_checker",val:": StableDiffusionSafetyChecker"},{name:"feature_extractor",val:": CLIPImageProcessor"},{name:"image_encoder",val:": CLIPVisionModelWithProjection = None"},{name:"requires_safety_checker",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.StableDiffusionInpaintPipeline.vae",description:`<strong>vae</strong> ([<code>AutoencoderKL</code>, <code>AsymmetricAutoencoderKL</code>]) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.StableDiffusionInpaintPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>CLIPTextModel</code>) — | |
| Frozen text-encoder (<a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a>).`,name:"text_encoder"},{anchor:"diffusers.StableDiffusionInpaintPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>) — | |
| A <code>CLIPTokenizer</code> to tokenize text.`,name:"tokenizer"},{anchor:"diffusers.StableDiffusionInpaintPipeline.unet",description:`<strong>unet</strong> (<a href="/docs/diffusers/pr_12403/en/api/models/unet2d-cond#diffusers.UNet2DConditionModel">UNet2DConditionModel</a>) — | |
| A <code>UNet2DConditionModel</code> to denoise the encoded image latents.`,name:"unet"},{anchor:"diffusers.StableDiffusionInpaintPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_12403/en/api/schedulers/overview#diffusers.SchedulerMixin">SchedulerMixin</a>) — | |
| A scheduler to be used in combination with <code>unet</code> to denoise the encoded image latents. Can be one of | |
| <a href="/docs/diffusers/pr_12403/en/api/schedulers/ddim#diffusers.DDIMScheduler">DDIMScheduler</a>, <a href="/docs/diffusers/pr_12403/en/api/schedulers/lms_discrete#diffusers.LMSDiscreteScheduler">LMSDiscreteScheduler</a>, or <a href="/docs/diffusers/pr_12403/en/api/schedulers/pndm#diffusers.PNDMScheduler">PNDMScheduler</a>.`,name:"scheduler"},{anchor:"diffusers.StableDiffusionInpaintPipeline.safety_checker",description:`<strong>safety_checker</strong> (<code>StableDiffusionSafetyChecker</code>) — | |
| Classification module that estimates whether generated images could be considered offensive or harmful. | |
| Please refer to the <a href="https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5" rel="nofollow">model card</a> for | |
| more details about a model’s potential harms.`,name:"safety_checker"},{anchor:"diffusers.StableDiffusionInpaintPipeline.feature_extractor",description:`<strong>feature_extractor</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPImageProcessor" rel="nofollow">CLIPImageProcessor</a>) — | |
| A <code>CLIPImageProcessor</code> to extract features from generated images; used as inputs to the <code>safety_checker</code>.`,name:"feature_extractor"}],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py#L128"}}),ae=new C({props:{name:"__call__",anchor:"diffusers.StableDiffusionInpaintPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"mask_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"masked_image_latents",val:": Tensor = None"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"padding_mask_crop",val:": typing.Optional[int] = None"},{name:"strength",val:": float = 1.0"},{name:"num_inference_steps",val:": int = 50"},{name:"timesteps",val:": typing.List[int] = None"},{name:"sigmas",val:": typing.List[float] = None"},{name:"guidance_scale",val:": float = 7.5"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"eta",val:": float = 0.0"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"ip_adapter_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor], NoneType] = None"},{name:"ip_adapter_image_embeds",val:": typing.Optional[typing.List[torch.Tensor]] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"cross_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"clip_skip",val:": int = None"},{name:"callback_on_step_end",val:": typing.Union[typing.Callable[[int, int, typing.Dict], NoneType], diffusers.callbacks.PipelineCallback, diffusers.callbacks.MultiPipelineCallbacks, NoneType] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide image generation. If not defined, you need to pass <code>prompt_embeds</code>.`,name:"prompt"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.image",description:`<strong>image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, or <code>List[np.ndarray]</code>) — | |
| <code>Image</code>, numpy array or tensor representing an image batch to be inpainted (which parts of the image to | |
| be masked out with <code>mask_image</code> and repainted according to <code>prompt</code>). For both numpy array and pytorch | |
| tensor, the expected value range is between <code>[0, 1]</code> If it’s a tensor or a list or tensors, the | |
| expected shape should be <code>(B, C, H, W)</code> or <code>(C, H, W)</code>. If it is a numpy array or a list of arrays, the | |
| expected shape should be <code>(B, H, W, C)</code> or <code>(H, W, C)</code> It can also accept image latents as <code>image</code>, but | |
| if passing latents directly it is not encoded again.`,name:"image"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.mask_image",description:`<strong>mask_image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, or <code>List[np.ndarray]</code>) — | |
| <code>Image</code>, numpy array or tensor representing an image batch to mask <code>image</code>. White pixels in the mask | |
| are repainted while black pixels are preserved. If <code>mask_image</code> is a PIL image, it is converted to a | |
| single channel (luminance) before use. If it’s a numpy array or pytorch tensor, it should contain one | |
| color channel (L) instead of 3, so the expected shape for pytorch tensor would be <code>(B, 1, H, W)</code>, <code>(B, H, W)</code>, <code>(1, H, W)</code>, <code>(H, W)</code>. And for numpy array would be for <code>(B, H, W, 1)</code>, <code>(B, H, W)</code>, <code>(H, W, 1)</code>, or <code>(H, W)</code>.`,name:"mask_image"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to <code>self.unet.config.sample_size * self.vae_scale_factor</code>) — | |
| The height in pixels of the generated image.`,name:"height"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to <code>self.unet.config.sample_size * self.vae_scale_factor</code>) — | |
| The width in pixels of the generated image.`,name:"width"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.padding_mask_crop",description:`<strong>padding_mask_crop</strong> (<code>int</code>, <em>optional</em>, defaults to <code>None</code>) — | |
| The size of margin in the crop to be applied to the image and masking. If <code>None</code>, no crop is applied to | |
| image and mask_image. If <code>padding_mask_crop</code> is not <code>None</code>, it will first find a rectangular region | |
| with the same aspect ration of the image and contains all masked area, and then expand that area based | |
| on <code>padding_mask_crop</code>. The image and mask_image will then be cropped based on the expanded area before | |
| resizing to the original image size for inpainting. This is useful when the masked area is small while | |
| the image is large and contain information irrelevant for inpainting, such as background.`,name:"padding_mask_crop"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.strength",description:`<strong>strength</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) — | |
| Indicates extent to transform the reference <code>image</code>. Must be between 0 and 1. <code>image</code> is used as a | |
| starting point and more noise is added the higher the <code>strength</code>. The number of denoising steps depends | |
| on the amount of noise initially added. When <code>strength</code> is 1, added noise is maximum and the denoising | |
| process runs for the full number of iterations specified in <code>num_inference_steps</code>. A value of 1 | |
| essentially ignores <code>image</code>.`,name:"strength"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference. This parameter is modulated by <code>strength</code>.`,name:"num_inference_steps"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) — | |
| Custom timesteps to use for the denoising process with schedulers which support a <code>timesteps</code> argument | |
| in their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is | |
| passed will be used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) — | |
| Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in | |
| their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed | |
| will be used.`,name:"sigmas"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 7.5) — | |
| A higher guidance scale value encourages the model to generate images closely linked to the text | |
| <code>prompt</code> at the expense of lower image quality. Guidance scale is enabled when <code>guidance_scale > 1</code>.`,name:"guidance_scale"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide what to not include in image generation. If not defined, you need to | |
| pass <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (<code>guidance_scale < 1</code>).`,name:"negative_prompt"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) — | |
| Corresponds to parameter eta (η) from the <a href="https://huggingface.co/papers/2010.02502" rel="nofollow">DDIM</a> paper. Only | |
| applies to the <a href="/docs/diffusers/pr_12403/en/api/schedulers/ddim#diffusers.DDIMScheduler">DDIMScheduler</a>, and is ignored in other schedulers.`,name:"eta"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| A <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow"><code>torch.Generator</code></a> to make | |
| generation deterministic.`,name:"generator"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor is generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not | |
| provided, text embeddings are generated from the <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If | |
| not provided, <code>negative_prompt_embeds</code> are generated from the <code>negative_prompt</code> input argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.ip_adapter_image",description:"<strong>ip_adapter_image</strong> — (<code>PipelineImageInput</code>, <em>optional</em>): Optional image input to work with IP Adapters.",name:"ip_adapter_image"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.ip_adapter_image_embeds",description:`<strong>ip_adapter_image_embeds</strong> (<code>List[torch.Tensor]</code>, <em>optional</em>) — | |
| Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of | |
| IP-adapters. Each element should be a tensor of shape <code>(batch_size, num_images, emb_dim)</code>. It should | |
| contain the negative image embedding if <code>do_classifier_free_guidance</code> is set to <code>True</code>. If not | |
| provided, embeddings are computed from the <code>ip_adapter_image</code> input argument.`,name:"ip_adapter_image_embeds"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generated image. Choose between <code>PIL.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <a href="/docs/diffusers/pr_12403/en/api/pipelines/stable_diffusion/text2img#diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput">StableDiffusionPipelineOutput</a> instead of a | |
| plain tuple.`,name:"return_dict"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.cross_attention_kwargs",description:`<strong>cross_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow"><code>self.processor</code></a>.`,name:"cross_attention_kwargs"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.clip_skip",description:`<strong>clip_skip</strong> (<code>int</code>, <em>optional</em>) — | |
| Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that | |
| the output of the pre-final layer will be used for computing the prompt embeddings.`,name:"clip_skip"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <code>PipelineCallback</code>, <code>MultiPipelineCallbacks</code>, <em>optional</em>) — | |
| A function or a subclass of <code>PipelineCallback</code> or <code>MultiPipelineCallbacks</code> that is called at the end of | |
| each denoising step during the inference. with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a | |
| list of all tensors as specified by <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"}],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py#L880",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>If <code>return_dict</code> is <code>True</code>, <a | |
| href="/docs/diffusers/pr_12403/en/api/pipelines/stable_diffusion/text2img#diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput" | |
| >StableDiffusionPipelineOutput</a> is returned, | |
| otherwise a <code>tuple</code> is returned where the first element is a list with the generated images and the | |
| second element is a list of <code>bool</code>s indicating whether the corresponding generated image contains | |
| “not-safe-for-work” (nsfw) content.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><a | |
| href="/docs/diffusers/pr_12403/en/api/pipelines/stable_diffusion/text2img#diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput" | |
| >StableDiffusionPipelineOutput</a> or <code>tuple</code></p> | |
| `}}),R=new Xe({props:{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.example",$$slots:{default:[In]},$$scope:{ctx:$}}}),se=new C({props:{name:"enable_attention_slicing",anchor:"diffusers.StableDiffusionInpaintPipeline.enable_attention_slicing",parameters:[{name:"slice_size",val:": typing.Union[int, str, NoneType] = 'auto'"}],parametersDescription:[{anchor:"diffusers.StableDiffusionInpaintPipeline.enable_attention_slicing.slice_size",description:`<strong>slice_size</strong> (<code>str</code> or <code>int</code>, <em>optional</em>, defaults to <code>"auto"</code>) — | |
| When <code>"auto"</code>, halves the input to the attention heads, so attention will be computed in two steps. If | |
| <code>"max"</code>, maximum amount of memory will be saved by running only one slice at a time. If a number is | |
| provided, uses as many slices as <code>attention_head_dim // slice_size</code>. In this case, <code>attention_head_dim</code> | |
| must be a multiple of <code>slice_size</code>.`,name:"slice_size"}],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/pipeline_utils.py#L2002"}}),X=new zt({props:{warning:!0,$$slots:{default:[Mn]},$$scope:{ctx:$}}}),E=new Xe({props:{anchor:"diffusers.StableDiffusionInpaintPipeline.enable_attention_slicing.example",$$slots:{default:[$n]},$$scope:{ctx:$}}}),re=new C({props:{name:"disable_attention_slicing",anchor:"diffusers.StableDiffusionInpaintPipeline.disable_attention_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/pipeline_utils.py#L2042"}}),le=new C({props:{name:"enable_xformers_memory_efficient_attention",anchor:"diffusers.StableDiffusionInpaintPipeline.enable_xformers_memory_efficient_attention",parameters:[{name:"attention_op",val:": typing.Optional[typing.Callable] = None"}],parametersDescription:[{anchor:"diffusers.StableDiffusionInpaintPipeline.enable_xformers_memory_efficient_attention.attention_op",description:`<strong>attention_op</strong> (<code>Callable</code>, <em>optional</em>) — | |
| Override the default <code>None</code> operator for use as <code>op</code> argument to the | |
| <a href="https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.memory_efficient_attention" rel="nofollow"><code>memory_efficient_attention()</code></a> | |
| function of xFormers.`,name:"attention_op"}],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/pipeline_utils.py#L1941"}}),H=new zt({props:{warning:!0,$$slots:{default:[Tn]},$$scope:{ctx:$}}}),F=new Xe({props:{anchor:"diffusers.StableDiffusionInpaintPipeline.enable_xformers_memory_efficient_attention.example",$$slots:{default:[kn]},$$scope:{ctx:$}}}),de=new C({props:{name:"disable_xformers_memory_efficient_attention",anchor:"diffusers.StableDiffusionInpaintPipeline.disable_xformers_memory_efficient_attention",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/pipeline_utils.py#L1976"}}),pe=new C({props:{name:"load_textual_inversion",anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion",parameters:[{name:"pretrained_model_name_or_path",val:": typing.Union[str, typing.List[str], typing.Dict[str, torch.Tensor], typing.List[typing.Dict[str, torch.Tensor]]]"},{name:"token",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"tokenizer",val:": typing.Optional[ForwardRef('PreTrainedTokenizer')] = None"},{name:"text_encoder",val:": typing.Optional[ForwardRef('PreTrainedModel')] = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code> or <code>List[str or os.PathLike]</code> or <code>Dict</code> or <code>List[Dict]</code>) — | |
| Can be either one of the following or a list of them:</p> | |
| <ul> | |
| <li>A string, the <em>model id</em> (for example <code>sd-concepts-library/low-poly-hd-logos-icons</code>) of a | |
| pretrained model hosted on the Hub.</li> | |
| <li>A path to a <em>directory</em> (for example <code>./my_text_inversion_directory/</code>) containing the textual | |
| inversion weights.</li> | |
| <li>A path to a <em>file</em> (for example <code>./my_text_inversions.pt</code>) containing textual inversion weights.</li> | |
| <li>A <a href="https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict" rel="nofollow">torch state | |
| dict</a>.</li> | |
| </ul>`,name:"pretrained_model_name_or_path"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.token",description:`<strong>token</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| Override the token to use for the textual inversion weights. If <code>pretrained_model_name_or_path</code> is a | |
| list, then <code>token</code> must also be a list of equal length.`,name:"token"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.text_encoder",description:`<strong>text_encoder</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPTextModel" rel="nofollow">CLIPTextModel</a>, <em>optional</em>) — | |
| Frozen text-encoder (<a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a>). | |
| If not specified, function will take self.tokenizer.`,name:"text_encoder"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.tokenizer",description:`<strong>tokenizer</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>, <em>optional</em>) — | |
| A <code>CLIPTokenizer</code> to tokenize text. If not specified, function will take self.tokenizer.`,name:"tokenizer"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.weight_name",description:`<strong>weight_name</strong> (<code>str</code>, <em>optional</em>) — | |
| Name of a custom weight file. This should be used when:</p> | |
| <ul> | |
| <li>The saved textual inversion file is in 🤗 Diffusers format, but was saved under a specific weight | |
| name such as <code>text_inv.bin</code>.</li> | |
| <li>The saved textual inversion file is in the Automatic1111 format.</li> | |
| </ul>`,name:"weight_name"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.cache_dir",description:`<strong>cache_dir</strong> (<code>Union[str, os.PathLike]</code>, <em>optional</em>) — | |
| Path to a directory where a downloaded pretrained model configuration is cached if the standard cache | |
| is not used.`,name:"cache_dir"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.force_download",description:`<strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether or not to force the (re-)download of the model weights and configuration files, overriding the | |
| cached versions if they exist.`,name:"force_download"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.proxies",description:`<strong>proxies</strong> (<code>Dict[str, str]</code>, <em>optional</em>) — | |
| A dictionary of proxy servers to use by protocol or endpoint, for example, <code>{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}</code>. The proxies are used on each request.`,name:"proxies"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.local_files_only",description:`<strong>local_files_only</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether to only load local model weights and configuration files or not. If set to <code>True</code>, the model | |
| won’t be downloaded from the Hub.`,name:"local_files_only"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.hf_token",description:`<strong>hf_token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) — | |
| The token to use as HTTP bearer authorization for remote files. If <code>True</code>, the token generated from | |
| <code>diffusers-cli login</code> (stored in <code>~/.huggingface</code>) is used.`,name:"hf_token"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.revision",description:`<strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"main"</code>) — | |
| The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier | |
| allowed by Git.`,name:"revision"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.subfolder",description:`<strong>subfolder</strong> (<code>str</code>, <em>optional</em>, defaults to <code>""</code>) — | |
| The subfolder location of a model file within a larger model repository on the Hub or locally.`,name:"subfolder"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.mirror",description:`<strong>mirror</strong> (<code>str</code>, <em>optional</em>) — | |
| Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not | |
| guarantee the timeliness or safety of the source, and you should refer to the mirror site for more | |
| information.`,name:"mirror"}],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/loaders/textual_inversion.py#L263"}}),Y=new Xe({props:{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.example",$$slots:{default:[Pn]},$$scope:{ctx:$}}}),A=new Xe({props:{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.example-2",$$slots:{default:[Sn]},$$scope:{ctx:$}}}),ce=new C({props:{name:"load_lora_weights",anchor:"diffusers.StableDiffusionInpaintPipeline.load_lora_weights",parameters:[{name:"pretrained_model_name_or_path_or_dict",val:": typing.Union[str, typing.Dict[str, torch.Tensor]]"},{name:"adapter_name",val:": typing.Optional[str] = None"},{name:"hotswap",val:": bool = False"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.StableDiffusionInpaintPipeline.load_lora_weights.pretrained_model_name_or_path_or_dict",description:`<strong>pretrained_model_name_or_path_or_dict</strong> (<code>str</code> or <code>os.PathLike</code> or <code>dict</code>) — | |
| See <a href="/docs/diffusers/pr_12403/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict">lora_state_dict()</a>.`,name:"pretrained_model_name_or_path_or_dict"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_lora_weights.adapter_name",description:`<strong>adapter_name</strong> (<code>str</code>, <em>optional</em>) — | |
| Adapter name to be used for referencing the loaded adapter model. If not specified, it will use | |
| <code>default_{i}</code> where i is the total number of adapters being loaded.`,name:"adapter_name"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_lora_weights.low_cpu_mem_usage",description:`<strong>low_cpu_mem_usage</strong> (<code>bool</code>, <em>optional</em>) — | |
| Speed up model loading by only loading the pretrained LoRA weights and not initializing the random | |
| weights.`,name:"low_cpu_mem_usage"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_lora_weights.hotswap",description:`<strong>hotswap</strong> (<code>bool</code>, <em>optional</em>) — | |
| Defaults to <code>False</code>. Whether to substitute an existing (LoRA) adapter with the newly loaded adapter | |
| in-place. This means that, instead of loading an additional adapter, this will take the existing | |
| adapter weights and replace them with the weights of the new adapter. This can be faster and more | |
| memory efficient. However, the main advantage of hotswapping is that when the model is compiled with | |
| torch.compile, loading the new adapter does not require recompilation of the model. When using | |
| hotswapping, the passed <code>adapter_name</code> should be the name of an already loaded adapter.</p> | |
| <p>If the new adapter and the old adapter have different ranks and/or LoRA alphas (i.e. scaling), you need | |
| to call an additional method before loading the adapter:`,name:"hotswap"}],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/loaders/lora_pipeline.py#L138"}}),fe=new C({props:{name:"save_lora_weights",anchor:"diffusers.StableDiffusionInpaintPipeline.save_lora_weights",parameters:[{name:"save_directory",val:": typing.Union[str, os.PathLike]"},{name:"unet_lora_layers",val:": typing.Dict[str, typing.Union[torch.nn.modules.module.Module, torch.Tensor]] = None"},{name:"text_encoder_lora_layers",val:": typing.Dict[str, torch.nn.modules.module.Module] = None"},{name:"is_main_process",val:": bool = True"},{name:"weight_name",val:": str = None"},{name:"save_function",val:": typing.Callable = None"},{name:"safe_serialization",val:": bool = True"},{name:"unet_lora_adapter_metadata",val:" = None"},{name:"text_encoder_lora_adapter_metadata",val:" = None"}],parametersDescription:[{anchor:"diffusers.StableDiffusionInpaintPipeline.save_lora_weights.save_directory",description:`<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) — | |
| Directory to save LoRA parameters to. Will be created if it doesn’t exist.`,name:"save_directory"},{anchor:"diffusers.StableDiffusionInpaintPipeline.save_lora_weights.unet_lora_layers",description:`<strong>unet_lora_layers</strong> (<code>Dict[str, torch.nn.Module]</code> or <code>Dict[str, torch.Tensor]</code>) — | |
| State dict of the LoRA layers corresponding to the <code>unet</code>.`,name:"unet_lora_layers"},{anchor:"diffusers.StableDiffusionInpaintPipeline.save_lora_weights.text_encoder_lora_layers",description:`<strong>text_encoder_lora_layers</strong> (<code>Dict[str, torch.nn.Module]</code> or <code>Dict[str, torch.Tensor]</code>) — | |
| State dict of the LoRA layers corresponding to the <code>text_encoder</code>. Must explicitly pass the text | |
| encoder LoRA state dict because it comes from 🤗 Transformers.`,name:"text_encoder_lora_layers"},{anchor:"diffusers.StableDiffusionInpaintPipeline.save_lora_weights.is_main_process",description:`<strong>is_main_process</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether the process calling this is the main process or not. Useful during distributed training and you | |
| need to call this function on all processes. In this case, set <code>is_main_process=True</code> only on the main | |
| process to avoid race conditions.`,name:"is_main_process"},{anchor:"diffusers.StableDiffusionInpaintPipeline.save_lora_weights.save_function",description:`<strong>save_function</strong> (<code>Callable</code>) — | |
| The function to use to save the state dictionary. Useful during distributed training when you need to | |
| replace <code>torch.save</code> with another method. Can be configured with the environment variable | |
| <code>DIFFUSERS_SAVE_MODE</code>.`,name:"save_function"},{anchor:"diffusers.StableDiffusionInpaintPipeline.save_lora_weights.safe_serialization",description:`<strong>safe_serialization</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether to save the model using <code>safetensors</code> or the traditional PyTorch way with <code>pickle</code>.`,name:"safe_serialization"},{anchor:"diffusers.StableDiffusionInpaintPipeline.save_lora_weights.unet_lora_adapter_metadata",description:`<strong>unet_lora_adapter_metadata</strong> — | |
| LoRA adapter metadata associated with the unet to be serialized with the state dict.`,name:"unet_lora_adapter_metadata"},{anchor:"diffusers.StableDiffusionInpaintPipeline.save_lora_weights.text_encoder_lora_adapter_metadata",description:`<strong>text_encoder_lora_adapter_metadata</strong> — | |
| LoRA adapter metadata associated with the text encoder to be serialized with the state dict.`,name:"text_encoder_lora_adapter_metadata"}],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/loaders/lora_pipeline.py#L474"}}),me=new C({props:{name:"encode_prompt",anchor:"diffusers.StableDiffusionInpaintPipeline.encode_prompt",parameters:[{name:"prompt",val:""},{name:"device",val:""},{name:"num_images_per_prompt",val:""},{name:"do_classifier_free_guidance",val:""},{name:"negative_prompt",val:" = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"lora_scale",val:": typing.Optional[float] = None"},{name:"clip_skip",val:": typing.Optional[int] = None"}],parametersDescription:[{anchor:"diffusers.StableDiffusionInpaintPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.StableDiffusionInpaintPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>): | |
| torch device`,name:"device"},{anchor:"diffusers.StableDiffusionInpaintPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.StableDiffusionInpaintPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>) — | |
| whether to use classifier free guidance or not`,name:"do_classifier_free_guidance"},{anchor:"diffusers.StableDiffusionInpaintPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is | |
| less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.StableDiffusionInpaintPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.StableDiffusionInpaintPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.StableDiffusionInpaintPipeline.encode_prompt.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>, <em>optional</em>) — | |
| A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.`,name:"lora_scale"},{anchor:"diffusers.StableDiffusionInpaintPipeline.encode_prompt.clip_skip",description:`<strong>clip_skip</strong> (<code>int</code>, <em>optional</em>) — | |
| Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that | |
| the output of the pre-final layer will be used for computing the prompt embeddings.`,name:"clip_skip"}],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py#L312"}}),ue=new C({props:{name:"get_guidance_scale_embedding",anchor:"diffusers.StableDiffusionInpaintPipeline.get_guidance_scale_embedding",parameters:[{name:"w",val:": Tensor"},{name:"embedding_dim",val:": int = 512"},{name:"dtype",val:": dtype = torch.float32"}],parametersDescription:[{anchor:"diffusers.StableDiffusionInpaintPipeline.get_guidance_scale_embedding.w",description:`<strong>w</strong> (<code>torch.Tensor</code>) — | |
| Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.`,name:"w"},{anchor:"diffusers.StableDiffusionInpaintPipeline.get_guidance_scale_embedding.embedding_dim",description:`<strong>embedding_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 512) — | |
| Dimension of the embeddings to generate.`,name:"embedding_dim"},{anchor:"diffusers.StableDiffusionInpaintPipeline.get_guidance_scale_embedding.dtype",description:`<strong>dtype</strong> (<code>torch.dtype</code>, <em>optional</em>, defaults to <code>torch.float32</code>) — | |
| Data type of the generated embeddings.`,name:"dtype"}],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py#L823",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>Embedding vectors with shape <code>(len(w), embedding_dim)</code>.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>torch.Tensor</code></p> | |
| `}}),ge=new at({props:{title:"StableDiffusionPipelineOutput",local:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput",headingTag:"h2"}}),he=new C({props:{name:"class diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput",anchor:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput",parameters:[{name:"images",val:": typing.Union[typing.List[PIL.Image.Image], numpy.ndarray]"},{name:"nsfw_content_detected",val:": typing.Optional[typing.List[bool]]"}],parametersDescription:[{anchor:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput.images",description:`<strong>images</strong> (<code>List[PIL.Image.Image]</code> or <code>np.ndarray</code>) — | |
| List of denoised PIL images of length <code>batch_size</code> or NumPy array of shape <code>(batch_size, height, width, num_channels)</code>.`,name:"images"},{anchor:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput.nsfw_content_detected",description:`<strong>nsfw_content_detected</strong> (<code>List[bool]</code>) — | |
| List indicating whether the corresponding generated image contains “not-safe-for-work” (nsfw) content or | |
| <code>None</code> if safety checking could not be performed.`,name:"nsfw_content_detected"}],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/stable_diffusion/pipeline_output.py#L11"}}),_e=new wn({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/stable_diffusion/inpaint.md"}}),{c(){n=l("meta"),M=i(),p=l("p"),s=i(),b(f.$$.fragment),t=i(),_=l("div"),_.innerHTML=Ht,ze=i(),ee=l("p"),ee.textContent=Ft,He=i(),b(te.$$.fragment),Fe=i(),ne=l("p"),ne.innerHTML=Vt,Ve=i(),b(N.$$.fragment),Ye=i(),b(oe.$$.fragment),Ae=i(),m=l("div"),b(ie.$$.fragment),st=i(),ve=l("p"),ve.textContent=Yt,rt=i(),we=l("p"),we.innerHTML=At,lt=i(),xe=l("p"),xe.textContent=qt,dt=i(),Ie=l("ul"),Ie.innerHTML=Qt,pt=i(),W=l("div"),b(ae.$$.fragment),ct=i(),Me=l("p"),Me.textContent=Ot,ft=i(),b(R.$$.fragment),mt=i(),J=l("div"),b(se.$$.fragment),ut=i(),$e=l("p"),$e.textContent=Kt,gt=i(),b(X.$$.fragment),ht=i(),b(E.$$.fragment),_t=i(),z=l("div"),b(re.$$.fragment),bt=i(),Te=l("p"),Te.innerHTML=en,yt=i(),j=l("div"),b(le.$$.fragment),vt=i(),ke=l("p"),ke.innerHTML=tn,wt=i(),b(H.$$.fragment),xt=i(),b(F.$$.fragment),It=i(),V=l("div"),b(de.$$.fragment),Mt=i(),Pe=l("p"),Pe.innerHTML=nn,$t=i(),T=l("div"),b(pe.$$.fragment),Tt=i(),Se=l("p"),Se.innerHTML=on,kt=i(),De=l("p"),De.textContent=an,Pt=i(),b(Y.$$.fragment),St=i(),Ue=l("p"),Ue.innerHTML=sn,Dt=i(),b(A.$$.fragment),Ut=i(),k=l("div"),b(ce.$$.fragment),Jt=i(),Je=l("p"),Je.innerHTML=rn,jt=i(),je=l("p"),je.innerHTML=ln,Ct=i(),Ce=l("p"),Ce.innerHTML=dn,Lt=i(),Le=l("p"),Le.innerHTML=pn,Zt=i(),Ze=l("p"),Ze.innerHTML=cn,Wt=i(),q=l("div"),b(fe.$$.fragment),Gt=i(),We=l("p"),We.textContent=fn,Bt=i(),Q=l("div"),b(me.$$.fragment),Nt=i(),Ge=l("p"),Ge.textContent=mn,Rt=i(),O=l("div"),b(ue.$$.fragment),Xt=i(),Be=l("p"),Be.innerHTML=un,qe=i(),b(ge.$$.fragment),Qe=i(),G=l("div"),b(he.$$.fragment),Et=i(),Ne=l("p"),Ne.textContent=gn,Oe=i(),b(_e.$$.fragment),Ke=i(),Re=l("p"),this.h()},l(e){const c=vn("svelte-u9bgzb",document.head);n=d(c,"META",{name:!0,content:!0}),c.forEach(r),M=a(e),p=d(e,"P",{}),U(p).forEach(r),s=a(e),y(f.$$.fragment,e),t=a(e),_=d(e,"DIV",{class:!0,"data-svelte-h":!0}),h(_)!=="svelte-si9ct8"&&(_.innerHTML=Ht),ze=a(e),ee=d(e,"P",{"data-svelte-h":!0}),h(ee)!=="svelte-duffv7"&&(ee.textContent=Ft),He=a(e),y(te.$$.fragment,e),Fe=a(e),ne=d(e,"P",{"data-svelte-h":!0}),h(ne)!=="svelte-1q68sgx"&&(ne.innerHTML=Vt),Ve=a(e),y(N.$$.fragment,e),Ye=a(e),y(oe.$$.fragment,e),Ae=a(e),m=d(e,"DIV",{class:!0});var g=U(m);y(ie.$$.fragment,g),st=a(g),ve=d(g,"P",{"data-svelte-h":!0}),h(ve)!=="svelte-80nqwb"&&(ve.textContent=Yt),rt=a(g),we=d(g,"P",{"data-svelte-h":!0}),h(we)!=="svelte-1yt3ip0"&&(we.innerHTML=At),lt=a(g),xe=d(g,"P",{"data-svelte-h":!0}),h(xe)!=="svelte-14s6m4u"&&(xe.textContent=qt),dt=a(g),Ie=d(g,"UL",{"data-svelte-h":!0}),h(Ie)!=="svelte-v7u5kr"&&(Ie.innerHTML=Qt),pt=a(g),W=d(g,"DIV",{class:!0});var B=U(W);y(ae.$$.fragment,B),ct=a(B),Me=d(B,"P",{"data-svelte-h":!0}),h(Me)!=="svelte-50j04k"&&(Me.textContent=Ot),ft=a(B),y(R.$$.fragment,B),B.forEach(r),mt=a(g),J=d(g,"DIV",{class:!0});var L=U(J);y(se.$$.fragment,L),ut=a(L),$e=d(L,"P",{"data-svelte-h":!0}),h($e)!=="svelte-10jaql7"&&($e.textContent=Kt),gt=a(L),y(X.$$.fragment,L),ht=a(L),y(E.$$.fragment,L),L.forEach(r),_t=a(g),z=d(g,"DIV",{class:!0});var be=U(z);y(re.$$.fragment,be),bt=a(be),Te=d(be,"P",{"data-svelte-h":!0}),h(Te)!=="svelte-1lh0nh5"&&(Te.innerHTML=en),be.forEach(r),yt=a(g),j=d(g,"DIV",{class:!0});var Z=U(j);y(le.$$.fragment,Z),vt=a(Z),ke=d(Z,"P",{"data-svelte-h":!0}),h(ke)!=="svelte-e03q3e"&&(ke.innerHTML=tn),wt=a(Z),y(H.$$.fragment,Z),xt=a(Z),y(F.$$.fragment,Z),Z.forEach(r),It=a(g),V=d(g,"DIV",{class:!0});var ye=U(V);y(de.$$.fragment,ye),Mt=a(ye),Pe=d(ye,"P",{"data-svelte-h":!0}),h(Pe)!=="svelte-1vfte1e"&&(Pe.innerHTML=nn),ye.forEach(r),$t=a(g),T=d(g,"DIV",{class:!0});var P=U(T);y(pe.$$.fragment,P),Tt=a(P),Se=d(P,"P",{"data-svelte-h":!0}),h(Se)!=="svelte-2qciuk"&&(Se.innerHTML=on),kt=a(P),De=d(P,"P",{"data-svelte-h":!0}),h(De)!=="svelte-11lpom8"&&(De.textContent=an),Pt=a(P),y(Y.$$.fragment,P),St=a(P),Ue=d(P,"P",{"data-svelte-h":!0}),h(Ue)!=="svelte-15d7mv5"&&(Ue.innerHTML=sn),Dt=a(P),y(A.$$.fragment,P),P.forEach(r),Ut=a(g),k=d(g,"DIV",{class:!0});var S=U(k);y(ce.$$.fragment,S),Jt=a(S),Je=d(S,"P",{"data-svelte-h":!0}),h(Je)!=="svelte-vs7s0z"&&(Je.innerHTML=rn),jt=a(S),je=d(S,"P",{"data-svelte-h":!0}),h(je)!=="svelte-15b960v"&&(je.innerHTML=ln),Ct=a(S),Ce=d(S,"P",{"data-svelte-h":!0}),h(Ce)!=="svelte-f533yq"&&(Ce.innerHTML=dn),Lt=a(S),Le=d(S,"P",{"data-svelte-h":!0}),h(Le)!=="svelte-nerixt"&&(Le.innerHTML=pn),Zt=a(S),Ze=d(S,"P",{"data-svelte-h":!0}),h(Ze)!=="svelte-18mlnbr"&&(Ze.innerHTML=cn),S.forEach(r),Wt=a(g),q=d(g,"DIV",{class:!0});var tt=U(q);y(fe.$$.fragment,tt),Gt=a(tt),We=d(tt,"P",{"data-svelte-h":!0}),h(We)!=="svelte-1ufq5ot"&&(We.textContent=fn),tt.forEach(r),Bt=a(g),Q=d(g,"DIV",{class:!0});var nt=U(Q);y(me.$$.fragment,nt),Nt=a(nt),Ge=d(nt,"P",{"data-svelte-h":!0}),h(Ge)!=="svelte-16q0ax1"&&(Ge.textContent=mn),nt.forEach(r),Rt=a(g),O=d(g,"DIV",{class:!0});var ot=U(O);y(ue.$$.fragment,ot),Xt=a(ot),Be=d(ot,"P",{"data-svelte-h":!0}),h(Be)!=="svelte-vo59ec"&&(Be.innerHTML=un),ot.forEach(r),g.forEach(r),qe=a(e),y(ge.$$.fragment,e),Qe=a(e),G=d(e,"DIV",{class:!0});var it=U(G);y(he.$$.fragment,it),Et=a(it),Ne=d(it,"P",{"data-svelte-h":!0}),h(Ne)!=="svelte-1qpjiuf"&&(Ne.textContent=gn),it.forEach(r),Oe=a(e),y(_e.$$.fragment,e),Ke=a(e),Re=d(e,"P",{}),U(Re).forEach(r),this.h()},h(){D(n,"name","hf:doc:metadata"),D(n,"content",Un),D(_,"class","flex flex-wrap space-x-1"),D(W,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),D(J,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),D(z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),D(j,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),D(V,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),D(T,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),D(k,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),D(q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),D(Q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),D(O,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),D(m,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),D(G,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,c){o(document.head,n),u(e,M,c),u(e,p,c),u(e,s,c),v(f,e,c),u(e,t,c),u(e,_,c),u(e,ze,c),u(e,ee,c),u(e,He,c),v(te,e,c),u(e,Fe,c),u(e,ne,c),u(e,Ve,c),v(N,e,c),u(e,Ye,c),v(oe,e,c),u(e,Ae,c),u(e,m,c),v(ie,m,null),o(m,st),o(m,ve),o(m,rt),o(m,we),o(m,lt),o(m,xe),o(m,dt),o(m,Ie),o(m,pt),o(m,W),v(ae,W,null),o(W,ct),o(W,Me),o(W,ft),v(R,W,null),o(m,mt),o(m,J),v(se,J,null),o(J,ut),o(J,$e),o(J,gt),v(X,J,null),o(J,ht),v(E,J,null),o(m,_t),o(m,z),v(re,z,null),o(z,bt),o(z,Te),o(m,yt),o(m,j),v(le,j,null),o(j,vt),o(j,ke),o(j,wt),v(H,j,null),o(j,xt),v(F,j,null),o(m,It),o(m,V),v(de,V,null),o(V,Mt),o(V,Pe),o(m,$t),o(m,T),v(pe,T,null),o(T,Tt),o(T,Se),o(T,kt),o(T,De),o(T,Pt),v(Y,T,null),o(T,St),o(T,Ue),o(T,Dt),v(A,T,null),o(m,Ut),o(m,k),v(ce,k,null),o(k,Jt),o(k,Je),o(k,jt),o(k,je),o(k,Ct),o(k,Ce),o(k,Lt),o(k,Le),o(k,Zt),o(k,Ze),o(m,Wt),o(m,q),v(fe,q,null),o(q,Gt),o(q,We),o(m,Bt),o(m,Q),v(me,Q,null),o(Q,Nt),o(Q,Ge),o(m,Rt),o(m,O),v(ue,O,null),o(O,Xt),o(O,Be),u(e,qe,c),v(ge,e,c),u(e,Qe,c),u(e,G,c),v(he,G,null),o(G,Et),o(G,Ne),u(e,Oe,c),v(_e,e,c),u(e,Ke,c),u(e,Re,c),et=!0},p(e,[c]){const g={};c&2&&(g.$$scope={dirty:c,ctx:e}),N.$set(g);const B={};c&2&&(B.$$scope={dirty:c,ctx:e}),R.$set(B);const L={};c&2&&(L.$$scope={dirty:c,ctx:e}),X.$set(L);const be={};c&2&&(be.$$scope={dirty:c,ctx:e}),E.$set(be);const Z={};c&2&&(Z.$$scope={dirty:c,ctx:e}),H.$set(Z);const ye={};c&2&&(ye.$$scope={dirty:c,ctx:e}),F.$set(ye);const P={};c&2&&(P.$$scope={dirty:c,ctx:e}),Y.$set(P);const S={};c&2&&(S.$$scope={dirty:c,ctx:e}),A.$set(S)},i(e){et||(w(f.$$.fragment,e),w(te.$$.fragment,e),w(N.$$.fragment,e),w(oe.$$.fragment,e),w(ie.$$.fragment,e),w(ae.$$.fragment,e),w(R.$$.fragment,e),w(se.$$.fragment,e),w(X.$$.fragment,e),w(E.$$.fragment,e),w(re.$$.fragment,e),w(le.$$.fragment,e),w(H.$$.fragment,e),w(F.$$.fragment,e),w(de.$$.fragment,e),w(pe.$$.fragment,e),w(Y.$$.fragment,e),w(A.$$.fragment,e),w(ce.$$.fragment,e),w(fe.$$.fragment,e),w(me.$$.fragment,e),w(ue.$$.fragment,e),w(ge.$$.fragment,e),w(he.$$.fragment,e),w(_e.$$.fragment,e),et=!0)},o(e){x(f.$$.fragment,e),x(te.$$.fragment,e),x(N.$$.fragment,e),x(oe.$$.fragment,e),x(ie.$$.fragment,e),x(ae.$$.fragment,e),x(R.$$.fragment,e),x(se.$$.fragment,e),x(X.$$.fragment,e),x(E.$$.fragment,e),x(re.$$.fragment,e),x(le.$$.fragment,e),x(H.$$.fragment,e),x(F.$$.fragment,e),x(de.$$.fragment,e),x(pe.$$.fragment,e),x(Y.$$.fragment,e),x(A.$$.fragment,e),x(ce.$$.fragment,e),x(fe.$$.fragment,e),x(me.$$.fragment,e),x(ue.$$.fragment,e),x(ge.$$.fragment,e),x(he.$$.fragment,e),x(_e.$$.fragment,e),et=!1},d(e){e&&(r(M),r(p),r(s),r(t),r(_),r(ze),r(ee),r(He),r(Fe),r(ne),r(Ve),r(Ye),r(Ae),r(m),r(qe),r(Qe),r(G),r(Oe),r(Ke),r(Re)),r(n),I(f,e),I(te,e),I(N,e),I(oe,e),I(ie),I(ae),I(R),I(se),I(X),I(E),I(re),I(le),I(H),I(F),I(de),I(pe),I(Y),I(A),I(ce),I(fe),I(me),I(ue),I(ge,e),I(he),I(_e,e)}}}const Un='{"title":"Inpainting","local":"inpainting","sections":[{"title":"Tips","local":"tips","sections":[],"depth":2},{"title":"StableDiffusionInpaintPipeline","local":"diffusers.StableDiffusionInpaintPipeline","sections":[],"depth":2},{"title":"StableDiffusionPipelineOutput","local":"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput","sections":[],"depth":2}],"depth":1}';function Jn($){return _n(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Nn extends bn{constructor(n){super(),yn(this,n,Jn,Dn,hn,{})}}export{Nn as component}; | |
Xet Storage Details
- Size:
- 73.4 kB
- Xet hash:
- 83ace03c8a8ad3e3360a3092f0048f1785d0ee6de58e4040696dfe26bf4500e4
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.