Buckets:
| import{s as On,o as Kn,n as X}from"../chunks/scheduler.8c3d61f6.js";import{S as ea,i as ta,g as d,s,r as u,A as na,h as p,f as r,c as i,j as $,u as g,x as w,k as J,y as a,a as f,v as h,d as _,t as b,w as y}from"../chunks/index.da70eac4.js";import{T as St}from"../chunks/Tip.1d9b8c37.js";import{D as P}from"../chunks/Docstring.6b390b9a.js";import{C as pt}from"../chunks/CodeBlock.00a903b3.js";import{E as lt}from"../chunks/ExampleCodeBlock.db12be95.js";import{H as dt,E as aa}from"../chunks/EditOnGithub.1e64e623.js";function sa(T){let n,I='Make sure to check out the Stable Diffusion <a href="overview#tips">Tips</a> section to learn how to explore the tradeoff between scheduler speed and quality, and how to reuse pipeline components efficiently!',l,o,m='If you’re interested in using one of the official checkpoints for a task, explore the <a href="https://huggingface.co/CompVis" rel="nofollow">CompVis</a>, <a href="https://huggingface.co/runwayml" rel="nofollow">Runway</a>, and <a href="https://huggingface.co/stabilityai" rel="nofollow">Stability AI</a> Hub organizations!';return{c(){n=d("p"),n.innerHTML=I,l=s(),o=d("p"),o.innerHTML=m},l(t){n=p(t,"P",{"data-svelte-h":!0}),w(n)!=="svelte-1j961ct"&&(n.innerHTML=I),l=i(t),o=p(t,"P",{"data-svelte-h":!0}),w(o)!=="svelte-z4pn9c"&&(o.innerHTML=m)},m(t,v){f(t,n,v),f(t,l,v),f(t,o,v)},p:X,d(t){t&&(r(n),r(l),r(o))}}}function ia(T){let n,I="Examples:",l,o,m;return o=new pt({props:{code:"aW1wb3J0JTIwUElMJTBBaW1wb3J0JTIwcmVxdWVzdHMlMEFpbXBvcnQlMjB0b3JjaCUwQWZyb20lMjBpbyUyMGltcG9ydCUyMEJ5dGVzSU8lMEElMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwU3RhYmxlRGlmZnVzaW9uSW5wYWludFBpcGVsaW5lJTBBJTBBJTBBZGVmJTIwZG93bmxvYWRfaW1hZ2UodXJsKSUzQSUwQSUyMCUyMCUyMCUyMHJlc3BvbnNlJTIwJTNEJTIwcmVxdWVzdHMuZ2V0KHVybCklMEElMjAlMjAlMjAlMjByZXR1cm4lMjBQSUwuSW1hZ2Uub3BlbihCeXRlc0lPKHJlc3BvbnNlLmNvbnRlbnQpKS5jb252ZXJ0KCUyMlJHQiUyMiklMEElMEElMEFpbWdfdXJsJTIwJTNEJTIwJTIyaHR0cHMlM0ElMkYlMkZyYXcuZ2l0aHVidXNlcmNvbnRlbnQuY29tJTJGQ29tcFZpcyUyRmxhdGVudC1kaWZmdXNpb24lMkZtYWluJTJGZGF0YSUyRmlucGFpbnRpbmdfZXhhbXBsZXMlMkZvdmVydHVyZS1jcmVhdGlvbnMtNXNJNmZRZ1lJdW8ucG5nJTIyJTBBbWFza191cmwlMjAlM0QlMjAlMjJodHRwcyUzQSUyRiUyRnJhdy5naXRodWJ1c2VyY29udGVudC5jb20lMkZDb21wVmlzJTJGbGF0ZW50LWRpZmZ1c2lvbiUyRm1haW4lMkZkYXRhJTJGaW5wYWludGluZ19leGFtcGxlcyUyRm92ZXJ0dXJlLWNyZWF0aW9ucy01c0k2ZlFnWUl1b19tYXNrLnBuZyUyMiUwQSUwQWluaXRfaW1hZ2UlMjAlM0QlMjBkb3dubG9hZF9pbWFnZShpbWdfdXJsKS5yZXNpemUoKDUxMiUyQyUyMDUxMikpJTBBbWFza19pbWFnZSUyMCUzRCUyMGRvd25sb2FkX2ltYWdlKG1hc2tfdXJsKS5yZXNpemUoKDUxMiUyQyUyMDUxMikpJTBBJTBBcGlwZSUyMCUzRCUyMFN0YWJsZURpZmZ1c2lvbklucGFpbnRQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIycnVud2F5bWwlMkZzdGFibGUtZGlmZnVzaW9uLWlucGFpbnRpbmclMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYlMEEpJTBBcGlwZSUyMCUzRCUyMHBpcGUudG8oJTIyY3VkYSUyMiklMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJGYWNlJTIwb2YlMjBhJTIweWVsbG93JTIwY2F0JTJDJTIwaGlnaCUyMHJlc29sdXRpb24lMkMlMjBzaXR0aW5nJTIwb24lMjBhJTIwcGFyayUyMGJlbmNoJTIyJTBBaW1hZ2UlMjAlM0QlMjBwaXBlKHByb21wdCUzRHByb21wdCUyQyUyMGltYWdlJTNEaW5pdF9pbWFnZSUyQyUyMG1hc2tfaW1hZ2UlM0RtYXNrX2ltYWdlKS5pbWFnZXMlNUIwJTVE",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> PIL | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> requests | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> io <span class="hljs-keyword">import</span> BytesIO | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionInpaintPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">download_image</span>(<span class="hljs-params">url</span>): | |
| <span class="hljs-meta">... </span> response = requests.get(url) | |
| <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> PIL.Image.<span class="hljs-built_in">open</span>(BytesIO(response.content)).convert(<span class="hljs-string">"RGB"</span>) | |
| <span class="hljs-meta">>>> </span>img_url = <span class="hljs-string">"https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"</span> | |
| <span class="hljs-meta">>>> </span>mask_url = <span class="hljs-string">"https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"</span> | |
| <span class="hljs-meta">>>> </span>init_image = download_image(img_url).resize((<span class="hljs-number">512</span>, <span class="hljs-number">512</span>)) | |
| <span class="hljs-meta">>>> </span>mask_image = download_image(mask_url).resize((<span class="hljs-number">512</span>, <span class="hljs-number">512</span>)) | |
| <span class="hljs-meta">>>> </span>pipe = StableDiffusionInpaintPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"runwayml/stable-diffusion-inpainting"</span>, torch_dtype=torch.float16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipe = pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"Face of a yellow cat, high resolution, sitting on a park bench"</span> | |
| <span class="hljs-meta">>>> </span>image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){n=d("p"),n.textContent=I,l=s(),u(o.$$.fragment)},l(t){n=p(t,"P",{"data-svelte-h":!0}),w(n)!=="svelte-kvfsh7"&&(n.textContent=I),l=i(t),g(o.$$.fragment,t)},m(t,v){f(t,n,v),f(t,l,v),h(o,t,v),m=!0},p:X,i(t){m||(_(o.$$.fragment,t),m=!0)},o(t){b(o.$$.fragment,t),m=!1},d(t){t&&(r(n),r(l)),y(o,t)}}}function oa(T){let n,I=`⚠️ Don’t enable attention slicing if you’re already using <code>scaled_dot_product_attention</code> (SDPA) from PyTorch | |
| 2.0 or xFormers. These attention computations are already very memory efficient so you won’t need to enable | |
| this function. If you enable attention slicing with SDPA or xFormers, it can lead to serious slow downs!`;return{c(){n=d("p"),n.innerHTML=I},l(l){n=p(l,"P",{"data-svelte-h":!0}),w(n)!=="svelte-ackzsn"&&(n.innerHTML=I)},m(l,o){f(l,n,o)},p:X,d(l){l&&r(n)}}}function ra(T){let n,I="Examples:",l,o,m;return o=new pt({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwU3RhYmxlRGlmZnVzaW9uUGlwZWxpbmUlMEElMEFwaXBlJTIwJTNEJTIwU3RhYmxlRGlmZnVzaW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMnJ1bndheW1sJTJGc3RhYmxlLWRpZmZ1c2lvbi12MS01JTIyJTJDJTBBJTIwJTIwJTIwJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTJDJTBBJTIwJTIwJTIwJTIwdXNlX3NhZmV0ZW5zb3JzJTNEVHJ1ZSUyQyUwQSklMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJhJTIwcGhvdG8lMjBvZiUyMGFuJTIwYXN0cm9uYXV0JTIwcmlkaW5nJTIwYSUyMGhvcnNlJTIwb24lMjBtYXJzJTIyJTBBcGlwZS5lbmFibGVfYXR0ZW50aW9uX3NsaWNpbmcoKSUwQWltYWdlJTIwJTNEJTIwcGlwZShwcm9tcHQpLmltYWdlcyU1QjAlNUQ=",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionPipeline | |
| <span class="hljs-meta">>>> </span>pipe = StableDiffusionPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"runwayml/stable-diffusion-v1-5"</span>, | |
| <span class="hljs-meta">... </span> torch_dtype=torch.float16, | |
| <span class="hljs-meta">... </span> use_safetensors=<span class="hljs-literal">True</span>, | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"a photo of an astronaut riding a horse on mars"</span> | |
| <span class="hljs-meta">>>> </span>pipe.enable_attention_slicing() | |
| <span class="hljs-meta">>>> </span>image = pipe(prompt).images[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){n=d("p"),n.textContent=I,l=s(),u(o.$$.fragment)},l(t){n=p(t,"P",{"data-svelte-h":!0}),w(n)!=="svelte-kvfsh7"&&(n.textContent=I),l=i(t),g(o.$$.fragment,t)},m(t,v){f(t,n,v),f(t,l,v),h(o,t,v),m=!0},p:X,i(t){m||(_(o.$$.fragment,t),m=!0)},o(t){b(o.$$.fragment,t),m=!1},d(t){t&&(r(n),r(l)),y(o,t)}}}function la(T){let n,I=`⚠️ When memory efficient attention and sliced attention are both enabled, memory efficient attention takes | |
| precedent.`;return{c(){n=d("p"),n.textContent=I},l(l){n=p(l,"P",{"data-svelte-h":!0}),w(n)!=="svelte-17p1lpg"&&(n.textContent=I)},m(l,o){f(l,n,o)},p:X,d(l){l&&r(n)}}}function da(T){let n,I="Examples:",l,o,m;return o=new pt({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwRGlmZnVzaW9uUGlwZWxpbmUlMEFmcm9tJTIweGZvcm1lcnMub3BzJTIwaW1wb3J0JTIwTWVtb3J5RWZmaWNpZW50QXR0ZW50aW9uRmxhc2hBdHRlbnRpb25PcCUwQSUwQXBpcGUlMjAlM0QlMjBEaWZmdXNpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTIyc3RhYmlsaXR5YWklMkZzdGFibGUtZGlmZnVzaW9uLTItMSUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiklMEFwaXBlJTIwJTNEJTIwcGlwZS50byglMjJjdWRhJTIyKSUwQXBpcGUuZW5hYmxlX3hmb3JtZXJzX21lbW9yeV9lZmZpY2llbnRfYXR0ZW50aW9uKGF0dGVudGlvbl9vcCUzRE1lbW9yeUVmZmljaWVudEF0dGVudGlvbkZsYXNoQXR0ZW50aW9uT3ApJTBBJTIzJTIwV29ya2Fyb3VuZCUyMGZvciUyMG5vdCUyMGFjY2VwdGluZyUyMGF0dGVudGlvbiUyMHNoYXBlJTIwdXNpbmclMjBWQUUlMjBmb3IlMjBGbGFzaCUyMEF0dGVudGlvbiUwQXBpcGUudmFlLmVuYWJsZV94Zm9ybWVyc19tZW1vcnlfZWZmaWNpZW50X2F0dGVudGlvbihhdHRlbnRpb25fb3AlM0ROb25lKQ==",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> DiffusionPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> xformers.ops <span class="hljs-keyword">import</span> MemoryEfficientAttentionFlashAttentionOp | |
| <span class="hljs-meta">>>> </span>pipe = DiffusionPipeline.from_pretrained(<span class="hljs-string">"stabilityai/stable-diffusion-2-1"</span>, torch_dtype=torch.float16) | |
| <span class="hljs-meta">>>> </span>pipe = pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>pipe.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp) | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Workaround for not accepting attention shape using VAE for Flash Attention</span> | |
| <span class="hljs-meta">>>> </span>pipe.vae.enable_xformers_memory_efficient_attention(attention_op=<span class="hljs-literal">None</span>)`,wrap:!1}}),{c(){n=d("p"),n.textContent=I,l=s(),u(o.$$.fragment)},l(t){n=p(t,"P",{"data-svelte-h":!0}),w(n)!=="svelte-kvfsh7"&&(n.textContent=I),l=i(t),g(o.$$.fragment,t)},m(t,v){f(t,n,v),f(t,l,v),h(o,t,v),m=!0},p:X,i(t){m||(_(o.$$.fragment,t),m=!0)},o(t){b(o.$$.fragment,t),m=!1},d(t){t&&(r(n),r(l)),y(o,t)}}}function pa(T){let n,I="To load a Textual Inversion embedding vector in 🤗 Diffusers format:",l,o,m;return o=new pt({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMFN0YWJsZURpZmZ1c2lvblBpcGVsaW5lJTBBaW1wb3J0JTIwdG9yY2glMEElMEFtb2RlbF9pZCUyMCUzRCUyMCUyMnJ1bndheW1sJTJGc3RhYmxlLWRpZmZ1c2lvbi12MS01JTIyJTBBcGlwZSUyMCUzRCUyMFN0YWJsZURpZmZ1c2lvblBpcGVsaW5lLmZyb21fcHJldHJhaW5lZChtb2RlbF9pZCUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNikudG8oJTIyY3VkYSUyMiklMEElMEFwaXBlLmxvYWRfdGV4dHVhbF9pbnZlcnNpb24oJTIyc2QtY29uY2VwdHMtbGlicmFyeSUyRmNhdC10b3klMjIpJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTIyQSUyMCUzQ2NhdC10b3klM0UlMjBiYWNrcGFjayUyMiUwQSUwQWltYWdlJTIwJTNEJTIwcGlwZShwcm9tcHQlMkMlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNENTApLmltYWdlcyU1QjAlNUQlMEFpbWFnZS5zYXZlKCUyMmNhdC1iYWNrcGFjay5wbmclMjIp",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionPipeline | |
| <span class="hljs-keyword">import</span> torch | |
| model_id = <span class="hljs-string">"runwayml/stable-diffusion-v1-5"</span> | |
| pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to(<span class="hljs-string">"cuda"</span>) | |
| pipe.load_textual_inversion(<span class="hljs-string">"sd-concepts-library/cat-toy"</span>) | |
| prompt = <span class="hljs-string">"A <cat-toy> backpack"</span> | |
| image = pipe(prompt, num_inference_steps=<span class="hljs-number">50</span>).images[<span class="hljs-number">0</span>] | |
| image.save(<span class="hljs-string">"cat-backpack.png"</span>)`,wrap:!1}}),{c(){n=d("p"),n.textContent=I,l=s(),u(o.$$.fragment)},l(t){n=p(t,"P",{"data-svelte-h":!0}),w(n)!=="svelte-1gc783q"&&(n.textContent=I),l=i(t),g(o.$$.fragment,t)},m(t,v){f(t,n,v),f(t,l,v),h(o,t,v),m=!0},p:X,i(t){m||(_(o.$$.fragment,t),m=!0)},o(t){b(o.$$.fragment,t),m=!1},d(t){t&&(r(n),r(l)),y(o,t)}}}function ca(T){let n,I="locally:",l,o,m;return o=new pt({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMFN0YWJsZURpZmZ1c2lvblBpcGVsaW5lJTBBaW1wb3J0JTIwdG9yY2glMEElMEFtb2RlbF9pZCUyMCUzRCUyMCUyMnJ1bndheW1sJTJGc3RhYmxlLWRpZmZ1c2lvbi12MS01JTIyJTBBcGlwZSUyMCUzRCUyMFN0YWJsZURpZmZ1c2lvblBpcGVsaW5lLmZyb21fcHJldHJhaW5lZChtb2RlbF9pZCUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNikudG8oJTIyY3VkYSUyMiklMEElMEFwaXBlLmxvYWRfdGV4dHVhbF9pbnZlcnNpb24oJTIyLiUyRmNoYXJ0dXJuZXJ2Mi5wdCUyMiUyQyUyMHRva2VuJTNEJTIyY2hhcnR1cm5lcnYyJTIyKSUwQSUwQXByb21wdCUyMCUzRCUyMCUyMmNoYXJ0dXJuZXJ2MiUyQyUyMG11bHRpcGxlJTIwdmlld3MlMjBvZiUyMHRoZSUyMHNhbWUlMjBjaGFyYWN0ZXIlMjBpbiUyMHRoZSUyMHNhbWUlMjBvdXRmaXQlMkMlMjBhJTIwY2hhcmFjdGVyJTIwdHVybmFyb3VuZCUyMG9mJTIwYSUyMHdvbWFuJTIwd2VhcmluZyUyMGElMjBibGFjayUyMGphY2tldCUyMGFuZCUyMHJlZCUyMHNoaXJ0JTJDJTIwYmVzdCUyMHF1YWxpdHklMkMlMjBpbnRyaWNhdGUlMjBkZXRhaWxzLiUyMiUwQSUwQWltYWdlJTIwJTNEJTIwcGlwZShwcm9tcHQlMkMlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNENTApLmltYWdlcyU1QjAlNUQlMEFpbWFnZS5zYXZlKCUyMmNoYXJhY3Rlci5wbmclMjIp",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionPipeline | |
| <span class="hljs-keyword">import</span> torch | |
| model_id = <span class="hljs-string">"runwayml/stable-diffusion-v1-5"</span> | |
| pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to(<span class="hljs-string">"cuda"</span>) | |
| pipe.load_textual_inversion(<span class="hljs-string">"./charturnerv2.pt"</span>, token=<span class="hljs-string">"charturnerv2"</span>) | |
| prompt = <span class="hljs-string">"charturnerv2, multiple views of the same character in the same outfit, a character turnaround of a woman wearing a black jacket and red shirt, best quality, intricate details."</span> | |
| image = pipe(prompt, num_inference_steps=<span class="hljs-number">50</span>).images[<span class="hljs-number">0</span>] | |
| image.save(<span class="hljs-string">"character.png"</span>)`,wrap:!1}}),{c(){n=d("p"),n.textContent=I,l=s(),u(o.$$.fragment)},l(t){n=p(t,"P",{"data-svelte-h":!0}),w(n)!=="svelte-4c75kq"&&(n.textContent=I),l=i(t),g(o.$$.fragment,t)},m(t,v){f(t,n,v),f(t,l,v),h(o,t,v),m=!0},p:X,i(t){m||(_(o.$$.fragment,t),m=!0)},o(t){b(o.$$.fragment,t),m=!1},d(t){t&&(r(n),r(l)),y(o,t)}}}function fa(T){let n,I="🧪 This is an experimental feature!";return{c(){n=d("p"),n.textContent=I},l(l){n=p(l,"P",{"data-svelte-h":!0}),w(n)!=="svelte-15q3ih4"&&(n.textContent=I)},m(l,o){f(l,n,o)},p:X,d(l){l&&r(n)}}}function ma(T){let n,I="Examples:",l,o,m;return o=new pt({props:{code:"aW1wb3J0JTIwamF4JTBBaW1wb3J0JTIwbnVtcHklMjBhcyUyMG5wJTBBZnJvbSUyMGZsYXguamF4X3V0aWxzJTIwaW1wb3J0JTIwcmVwbGljYXRlJTBBZnJvbSUyMGZsYXgudHJhaW5pbmcuY29tbW9uX3V0aWxzJTIwaW1wb3J0JTIwc2hhcmQlMEFpbXBvcnQlMjBQSUwlMEFpbXBvcnQlMjByZXF1ZXN0cyUwQWZyb20lMjBpbyUyMGltcG9ydCUyMEJ5dGVzSU8lMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwRmxheFN0YWJsZURpZmZ1c2lvbklucGFpbnRQaXBlbGluZSUwQSUwQSUwQWRlZiUyMGRvd25sb2FkX2ltYWdlKHVybCklM0ElMEElMjAlMjAlMjAlMjByZXNwb25zZSUyMCUzRCUyMHJlcXVlc3RzLmdldCh1cmwpJTBBJTIwJTIwJTIwJTIwcmV0dXJuJTIwUElMLkltYWdlLm9wZW4oQnl0ZXNJTyhyZXNwb25zZS5jb250ZW50KSkuY29udmVydCglMjJSR0IlMjIpJTBBJTBBJTBBaW1nX3VybCUyMCUzRCUyMCUyMmh0dHBzJTNBJTJGJTJGcmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSUyRkNvbXBWaXMlMkZsYXRlbnQtZGlmZnVzaW9uJTJGbWFpbiUyRmRhdGElMkZpbnBhaW50aW5nX2V4YW1wbGVzJTJGb3ZlcnR1cmUtY3JlYXRpb25zLTVzSTZmUWdZSXVvLnBuZyUyMiUwQW1hc2tfdXJsJTIwJTNEJTIwJTIyaHR0cHMlM0ElMkYlMkZyYXcuZ2l0aHVidXNlcmNvbnRlbnQuY29tJTJGQ29tcFZpcyUyRmxhdGVudC1kaWZmdXNpb24lMkZtYWluJTJGZGF0YSUyRmlucGFpbnRpbmdfZXhhbXBsZXMlMkZvdmVydHVyZS1jcmVhdGlvbnMtNXNJNmZRZ1lJdW9fbWFzay5wbmclMjIlMEElMEFpbml0X2ltYWdlJTIwJTNEJTIwZG93bmxvYWRfaW1hZ2UoaW1nX3VybCkucmVzaXplKCg1MTIlMkMlMjA1MTIpKSUwQW1hc2tfaW1hZ2UlMjAlM0QlMjBkb3dubG9hZF9pbWFnZShtYXNrX3VybCkucmVzaXplKCg1MTIlMkMlMjA1MTIpKSUwQSUwQXBpcGVsaW5lJTJDJTIwcGFyYW1zJTIwJTNEJTIwRmxheFN0YWJsZURpZmZ1c2lvbklucGFpbnRQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyeHZqaWFydWklMkZzdGFibGUtZGlmZnVzaW9uLTItaW5wYWludGluZyUyMiUwQSklMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJGYWNlJTIwb2YlMjBhJTIweWVsbG93JTIwY2F0JTJDJTIwaGlnaCUyMHJlc29sdXRpb24lMkMlMjBzaXR0aW5nJTIwb24lMjBhJTIwcGFyayUyMGJlbmNoJTIyJTBBcHJuZ19zZWVkJTIwJTNEJTIwamF4LnJhbmRvbS5QUk5HS2V5KDApJTBBbnVtX2luZmVyZW5jZV9zdGVwcyUyMCUzRCUyMDUwJTBBJTBBbnVtX3NhbXBsZXMlMjAlM0QlMjBqYXguZGV2aWNlX2NvdW50KCklMEFwcm9tcHQlMjAlM0QlMjBudW1fc2FtcGxlcyUyMColMjAlNUJwcm9tcHQlNUQlMEFpbml0X2ltYWdlJTIwJTNEJTIwbnVtX3NhbXBsZXMlMjAqJTIwJTVCaW5pdF9pbWFnZSU1RCUwQW1hc2tfaW1hZ2UlMjAlM0QlMjBudW1fc2FtcGxlcyUyMColMjAlNUJtYXNrX2ltYWdlJTVEJTBBcHJvbXB0X2lkcyUyQyUyMHByb2Nlc3NlZF9tYXNrZWRfaW1hZ2VzJTJDJTIwcHJvY2Vzc2VkX21hc2tzJTIwJTNEJTIwcGlwZWxpbmUucHJlcGFyZV9pbnB1dHMoJTBBJTIwJTIwJTIwJTIwcHJvbXB0JTJDJTIwaW5pdF9pbWFnZSUyQyUyMG1hc2tfaW1hZ2UlMEEpJTBBJTBBcGFyYW1zJTIwJTNEJTIwcmVwbGljYXRlKHBhcmFtcyklMEFwcm5nX3NlZWQlMjAlM0QlMjBqYXgucmFuZG9tLnNwbGl0KHBybmdfc2VlZCUyQyUyMGpheC5kZXZpY2VfY291bnQoKSklMEFwcm9tcHRfaWRzJTIwJTNEJTIwc2hhcmQocHJvbXB0X2lkcyklMEFwcm9jZXNzZWRfbWFza2VkX2ltYWdlcyUyMCUzRCUyMHNoYXJkKHByb2Nlc3NlZF9tYXNrZWRfaW1hZ2VzKSUwQXByb2Nlc3NlZF9tYXNrcyUyMCUzRCUyMHNoYXJkKHByb2Nlc3NlZF9tYXNrcyklMEElMEFpbWFnZXMlMjAlM0QlMjBwaXBlbGluZSglMEElMjAlMjAlMjAlMjBwcm9tcHRfaWRzJTJDJTIwcHJvY2Vzc2VkX21hc2tzJTJDJTIwcHJvY2Vzc2VkX21hc2tlZF9pbWFnZXMlMkMlMjBwYXJhbXMlMkMlMjBwcm5nX3NlZWQlMkMlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTJDJTIwaml0JTNEVHJ1ZSUwQSkuaW1hZ2VzJTBBaW1hZ2VzJTIwJTNEJTIwcGlwZWxpbmUubnVtcHlfdG9fcGlsKG5wLmFzYXJyYXkoaW1hZ2VzLnJlc2hhcGUoKG51bV9zYW1wbGVzJTJDKSUyMCUyQiUyMGltYWdlcy5zaGFwZSU1Qi0zJTNBJTVEKSkp",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> jax | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> numpy <span class="hljs-keyword">as</span> np | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> flax.jax_utils <span class="hljs-keyword">import</span> replicate | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> flax.training.common_utils <span class="hljs-keyword">import</span> shard | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> PIL | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> requests | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> io <span class="hljs-keyword">import</span> BytesIO | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> FlaxStableDiffusionInpaintPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">def</span> <span class="hljs-title function_">download_image</span>(<span class="hljs-params">url</span>): | |
| <span class="hljs-meta">... </span> response = requests.get(url) | |
| <span class="hljs-meta">... </span> <span class="hljs-keyword">return</span> PIL.Image.<span class="hljs-built_in">open</span>(BytesIO(response.content)).convert(<span class="hljs-string">"RGB"</span>) | |
| <span class="hljs-meta">>>> </span>img_url = <span class="hljs-string">"https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"</span> | |
| <span class="hljs-meta">>>> </span>mask_url = <span class="hljs-string">"https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"</span> | |
| <span class="hljs-meta">>>> </span>init_image = download_image(img_url).resize((<span class="hljs-number">512</span>, <span class="hljs-number">512</span>)) | |
| <span class="hljs-meta">>>> </span>mask_image = download_image(mask_url).resize((<span class="hljs-number">512</span>, <span class="hljs-number">512</span>)) | |
| <span class="hljs-meta">>>> </span>pipeline, params = FlaxStableDiffusionInpaintPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"xvjiarui/stable-diffusion-2-inpainting"</span> | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"Face of a yellow cat, high resolution, sitting on a park bench"</span> | |
| <span class="hljs-meta">>>> </span>prng_seed = jax.random.PRNGKey(<span class="hljs-number">0</span>) | |
| <span class="hljs-meta">>>> </span>num_inference_steps = <span class="hljs-number">50</span> | |
| <span class="hljs-meta">>>> </span>num_samples = jax.device_count() | |
| <span class="hljs-meta">>>> </span>prompt = num_samples * [prompt] | |
| <span class="hljs-meta">>>> </span>init_image = num_samples * [init_image] | |
| <span class="hljs-meta">>>> </span>mask_image = num_samples * [mask_image] | |
| <span class="hljs-meta">>>> </span>prompt_ids, processed_masked_images, processed_masks = pipeline.prepare_inputs( | |
| <span class="hljs-meta">... </span> prompt, init_image, mask_image | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-comment"># shard inputs and rng</span> | |
| <span class="hljs-meta">>>> </span>params = replicate(params) | |
| <span class="hljs-meta">>>> </span>prng_seed = jax.random.split(prng_seed, jax.device_count()) | |
| <span class="hljs-meta">>>> </span>prompt_ids = shard(prompt_ids) | |
| <span class="hljs-meta">>>> </span>processed_masked_images = shard(processed_masked_images) | |
| <span class="hljs-meta">>>> </span>processed_masks = shard(processed_masks) | |
| <span class="hljs-meta">>>> </span>images = pipeline( | |
| <span class="hljs-meta">... </span> prompt_ids, processed_masks, processed_masked_images, params, prng_seed, num_inference_steps, jit=<span class="hljs-literal">True</span> | |
| <span class="hljs-meta">... </span>).images | |
| <span class="hljs-meta">>>> </span>images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-<span class="hljs-number">3</span>:])))`,wrap:!1}}),{c(){n=d("p"),n.textContent=I,l=s(),u(o.$$.fragment)},l(t){n=p(t,"P",{"data-svelte-h":!0}),w(n)!=="svelte-kvfsh7"&&(n.textContent=I),l=i(t),g(o.$$.fragment,t)},m(t,v){f(t,n,v),f(t,l,v),h(o,t,v),m=!0},p:X,i(t){m||(_(o.$$.fragment,t),m=!0)},o(t){b(o.$$.fragment,t),m=!1},d(t){t&&(r(n),r(l)),y(o,t)}}}function ua(T){let n,I,l,o,m,t,v,Mn="The Stable Diffusion model can also be applied to inpainting which lets you edit specific parts of an image by providing a mask and a text prompt using Stable Diffusion.",ft,re,mt,le,Tn=`It is recommended to use this pipeline with checkpoints that have been specifically fine-tuned for inpainting, such | |
| as <a href="https://huggingface.co/runwayml/stable-diffusion-inpainting" rel="nofollow">runwayml/stable-diffusion-inpainting</a>. Default | |
| text-to-image Stable Diffusion checkpoints, such as | |
| <a href="https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5" rel="nofollow">stable-diffusion-v1-5/stable-diffusion-v1-5</a> are also compatible but they might be less performant.`,ut,V,gt,de,ht,x,pe,Pt,Ze,$n="Pipeline for text-guided image inpainting using Stable Diffusion.",jt,Ce,Jn=`This model inherits from <a href="/docs/diffusers/pr_10101/en/api/pipelines/overview#diffusers.DiffusionPipeline">DiffusionPipeline</a>. Check the superclass documentation for the generic methods | |
| implemented for all pipelines (downloading, saving, running on a particular device, etc.).`,Dt,We,kn="The pipeline also inherits the following loading methods:",Ut,Le,Sn='<li><a href="/docs/diffusers/pr_10101/en/api/loaders/textual_inversion#diffusers.loaders.TextualInversionLoaderMixin.load_textual_inversion">load_textual_inversion()</a> for loading textual inversion embeddings</li> <li><a href="/docs/diffusers/pr_10101/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_weights">load_lora_weights()</a> for loading LoRA weights</li> <li><a href="/docs/diffusers/pr_10101/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.save_lora_weights">save_lora_weights()</a> for saving LoRA weights</li> <li><a href="/docs/diffusers/pr_10101/en/api/loaders/ip_adapter#diffusers.loaders.IPAdapterMixin.load_ip_adapter">load_ip_adapter()</a> for loading IP Adapters</li> <li><a href="/docs/diffusers/pr_10101/en/api/loaders/single_file#diffusers.loaders.FromSingleFileMixin.from_single_file">from_single_file()</a> for loading <code>.ckpt</code> files</li>',Zt,N,ce,Ct,Fe,Pn="The call function to the pipeline for generation.",Wt,E,Lt,Z,fe,Ft,Ne,jn=`Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor | |
| in slices to compute attention in several steps. For more than one attention head, the computation is performed | |
| sequentially over each head. This is useful to save some memory in exchange for a small speed decrease.`,Nt,Y,Gt,H,Bt,A,me,Xt,Ge,Dn=`Disable sliced attention computation. If <code>enable_attention_slicing</code> was previously called, attention is | |
| computed in one step.`,zt,C,ue,Rt,Be,Un=`Enable memory efficient attention from <a href="https://facebookresearch.github.io/xformers/" rel="nofollow">xFormers</a>. When this | |
| option is enabled, you should observe lower GPU memory usage and a potential speed up during inference. Speed | |
| up during training is not guaranteed.`,Vt,Q,Et,q,Yt,O,ge,Ht,Xe,Zn='Disable memory efficient attention from <a href="https://facebookresearch.github.io/xformers/" rel="nofollow">xFormers</a>.',At,k,he,Qt,ze,Cn=`Load Textual Inversion embeddings into the text encoder of <a href="/docs/diffusers/pr_10101/en/api/pipelines/stable_diffusion/text2img#diffusers.StableDiffusionPipeline">StableDiffusionPipeline</a> (both 🤗 Diffusers and | |
| Automatic1111 formats are supported).`,qt,Re,Wn="Example:",Ot,K,Kt,Ve,Ln=`To load a Textual Inversion embedding vector in Automatic1111 format, make sure to download the vector first | |
| (for example from <a href="https://civitai.com/models/3036?modelVersionId=9857" rel="nofollow">civitAI</a>) and then load the vector`,en,ee,tn,S,_e,nn,Ee,Fn=`Load LoRA weights specified in <code>pretrained_model_name_or_path_or_dict</code> into <code>self.unet</code> and | |
| <code>self.text_encoder</code>.`,an,Ye,Nn="All kwargs are forwarded to <code>self.lora_state_dict</code>.",sn,He,Gn=`See <a href="/docs/diffusers/pr_10101/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict">lora_state_dict()</a> for more details on how the state dict is | |
| loaded.`,on,Ae,Bn=`See <a href="/docs/diffusers/pr_10101/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet">load_lora_into_unet()</a> for more details on how the state dict is | |
| loaded into <code>self.unet</code>.`,rn,Qe,Xn=`See <a href="/docs/diffusers/pr_10101/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder">load_lora_into_text_encoder()</a> for more details on how the state | |
| dict is loaded into <code>self.text_encoder</code>.`,ln,te,be,dn,qe,zn="Save the LoRA parameters corresponding to the UNet and text encoder.",pn,ne,ye,cn,Oe,Rn="Encodes the prompt into text encoder hidden states.",fn,ae,we,mn,Ke,Vn='See <a href="https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298" rel="nofollow">https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298</a>',_t,ve,bt,z,xe,un,et,En="Output class for Stable Diffusion pipelines.",yt,Ie,wt,j,Me,gn,tt,Yn="Flax-based pipeline for text-guided image inpainting using Stable Diffusion.",hn,se,_n,nt,Hn=`This model inherits from <a href="/docs/diffusers/pr_10101/en/api/pipelines/overview#diffusers.FlaxDiffusionPipeline">FlaxDiffusionPipeline</a>. Check the superclass documentation for the generic methods | |
| implemented for all pipelines (downloading, saving, running on a particular device, etc.).`,bn,G,Te,yn,at,An="Function invoked when calling the pipeline for generation.",wn,ie,vt,$e,xt,W,Je,vn,st,Qn="Output class for Flax-based Stable Diffusion pipelines.",xn,oe,ke,In,it,qn="Returns a new object replacing the specified fields with new values.",It,Se,Mt,ct,Tt;return m=new dt({props:{title:"Inpainting",local:"inpainting",headingTag:"h1"}}),re=new dt({props:{title:"Tips",local:"tips",headingTag:"h2"}}),V=new St({props:{$$slots:{default:[sa]},$$scope:{ctx:T}}}),de=new dt({props:{title:"StableDiffusionInpaintPipeline",local:"diffusers.StableDiffusionInpaintPipeline",headingTag:"h2"}}),pe=new P({props:{name:"class diffusers.StableDiffusionInpaintPipeline",anchor:"diffusers.StableDiffusionInpaintPipeline",parameters:[{name:"vae",val:": typing.Union[diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL, diffusers.models.autoencoders.autoencoder_asym_kl.AsymmetricAutoencoderKL]"},{name:"text_encoder",val:": CLIPTextModel"},{name:"tokenizer",val:": CLIPTokenizer"},{name:"unet",val:": UNet2DConditionModel"},{name:"scheduler",val:": KarrasDiffusionSchedulers"},{name:"safety_checker",val:": StableDiffusionSafetyChecker"},{name:"feature_extractor",val:": CLIPImageProcessor"},{name:"image_encoder",val:": CLIPVisionModelWithProjection = None"},{name:"requires_safety_checker",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.StableDiffusionInpaintPipeline.vae",description:`<strong>vae</strong> ([<code>AutoencoderKL</code>, <code>AsymmetricAutoencoderKL</code>]) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.StableDiffusionInpaintPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>CLIPTextModel</code>) — | |
| Frozen text-encoder (<a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a>).`,name:"text_encoder"},{anchor:"diffusers.StableDiffusionInpaintPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>) — | |
| A <code>CLIPTokenizer</code> to tokenize text.`,name:"tokenizer"},{anchor:"diffusers.StableDiffusionInpaintPipeline.unet",description:`<strong>unet</strong> (<a href="/docs/diffusers/pr_10101/en/api/models/unet2d-cond#diffusers.UNet2DConditionModel">UNet2DConditionModel</a>) — | |
| A <code>UNet2DConditionModel</code> to denoise the encoded image latents.`,name:"unet"},{anchor:"diffusers.StableDiffusionInpaintPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_10101/en/api/schedulers/overview#diffusers.SchedulerMixin">SchedulerMixin</a>) — | |
| A scheduler to be used in combination with <code>unet</code> to denoise the encoded image latents. Can be one of | |
| <a href="/docs/diffusers/pr_10101/en/api/schedulers/ddim#diffusers.DDIMScheduler">DDIMScheduler</a>, <a href="/docs/diffusers/pr_10101/en/api/schedulers/lms_discrete#diffusers.LMSDiscreteScheduler">LMSDiscreteScheduler</a>, or <a href="/docs/diffusers/pr_10101/en/api/schedulers/pndm#diffusers.PNDMScheduler">PNDMScheduler</a>.`,name:"scheduler"},{anchor:"diffusers.StableDiffusionInpaintPipeline.safety_checker",description:`<strong>safety_checker</strong> (<code>StableDiffusionSafetyChecker</code>) — | |
| Classification module that estimates whether generated images could be considered offensive or harmful. | |
| Please refer to the <a href="https://huggingface.co/runwayml/stable-diffusion-v1-5" rel="nofollow">model card</a> for more details | |
| about a model’s potential harms.`,name:"safety_checker"},{anchor:"diffusers.StableDiffusionInpaintPipeline.feature_extractor",description:`<strong>feature_extractor</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPImageProcessor" rel="nofollow">CLIPImageProcessor</a>) — | |
| A <code>CLIPImageProcessor</code> to extract features from generated images; used as inputs to the <code>safety_checker</code>.`,name:"feature_extractor"}],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py#L114"}}),ce=new P({props:{name:"__call__",anchor:"diffusers.StableDiffusionInpaintPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"mask_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"masked_image_latents",val:": Tensor = None"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"padding_mask_crop",val:": typing.Optional[int] = None"},{name:"strength",val:": float = 1.0"},{name:"num_inference_steps",val:": int = 50"},{name:"timesteps",val:": typing.List[int] = None"},{name:"sigmas",val:": typing.List[float] = None"},{name:"guidance_scale",val:": float = 7.5"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"eta",val:": float = 0.0"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"ip_adapter_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor], NoneType] = None"},{name:"ip_adapter_image_embeds",val:": typing.Optional[typing.List[torch.Tensor]] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"cross_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"clip_skip",val:": int = None"},{name:"callback_on_step_end",val:": typing.Union[typing.Callable[[int, int, typing.Dict], NoneType], diffusers.callbacks.PipelineCallback, diffusers.callbacks.MultiPipelineCallbacks, NoneType] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide image generation. If not defined, you need to pass <code>prompt_embeds</code>.`,name:"prompt"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.image",description:`<strong>image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, or <code>List[np.ndarray]</code>) — | |
| <code>Image</code>, numpy array or tensor representing an image batch to be inpainted (which parts of the image to | |
| be masked out with <code>mask_image</code> and repainted according to <code>prompt</code>). For both numpy array and pytorch | |
| tensor, the expected value range is between <code>[0, 1]</code> If it’s a tensor or a list or tensors, the | |
| expected shape should be <code>(B, C, H, W)</code> or <code>(C, H, W)</code>. If it is a numpy array or a list of arrays, the | |
| expected shape should be <code>(B, H, W, C)</code> or <code>(H, W, C)</code> It can also accept image latents as <code>image</code>, but | |
| if passing latents directly it is not encoded again.`,name:"image"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.mask_image",description:`<strong>mask_image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, or <code>List[np.ndarray]</code>) — | |
| <code>Image</code>, numpy array or tensor representing an image batch to mask <code>image</code>. White pixels in the mask | |
| are repainted while black pixels are preserved. If <code>mask_image</code> is a PIL image, it is converted to a | |
| single channel (luminance) before use. If it’s a numpy array or pytorch tensor, it should contain one | |
| color channel (L) instead of 3, so the expected shape for pytorch tensor would be <code>(B, 1, H, W)</code>, <code>(B, H, W)</code>, <code>(1, H, W)</code>, <code>(H, W)</code>. And for numpy array would be for <code>(B, H, W, 1)</code>, <code>(B, H, W)</code>, <code>(H, W, 1)</code>, or <code>(H, W)</code>.`,name:"mask_image"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to <code>self.unet.config.sample_size * self.vae_scale_factor</code>) — | |
| The height in pixels of the generated image.`,name:"height"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to <code>self.unet.config.sample_size * self.vae_scale_factor</code>) — | |
| The width in pixels of the generated image.`,name:"width"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.padding_mask_crop",description:`<strong>padding_mask_crop</strong> (<code>int</code>, <em>optional</em>, defaults to <code>None</code>) — | |
| The size of margin in the crop to be applied to the image and masking. If <code>None</code>, no crop is applied to | |
| image and mask_image. If <code>padding_mask_crop</code> is not <code>None</code>, it will first find a rectangular region | |
| with the same aspect ration of the image and contains all masked area, and then expand that area based | |
| on <code>padding_mask_crop</code>. The image and mask_image will then be cropped based on the expanded area before | |
| resizing to the original image size for inpainting. This is useful when the masked area is small while | |
| the image is large and contain information irrelevant for inpainting, such as background.`,name:"padding_mask_crop"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.strength",description:`<strong>strength</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) — | |
| Indicates extent to transform the reference <code>image</code>. Must be between 0 and 1. <code>image</code> is used as a | |
| starting point and more noise is added the higher the <code>strength</code>. The number of denoising steps depends | |
| on the amount of noise initially added. When <code>strength</code> is 1, added noise is maximum and the denoising | |
| process runs for the full number of iterations specified in <code>num_inference_steps</code>. A value of 1 | |
| essentially ignores <code>image</code>.`,name:"strength"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference. This parameter is modulated by <code>strength</code>.`,name:"num_inference_steps"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) — | |
| Custom timesteps to use for the denoising process with schedulers which support a <code>timesteps</code> argument | |
| in their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is | |
| passed will be used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) — | |
| Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in | |
| their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed | |
| will be used.`,name:"sigmas"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 7.5) — | |
| A higher guidance scale value encourages the model to generate images closely linked to the text | |
| <code>prompt</code> at the expense of lower image quality. Guidance scale is enabled when <code>guidance_scale > 1</code>.`,name:"guidance_scale"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide what to not include in image generation. If not defined, you need to | |
| pass <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (<code>guidance_scale < 1</code>).`,name:"negative_prompt"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) — | |
| Corresponds to parameter eta (η) from the <a href="https://arxiv.org/abs/2010.02502" rel="nofollow">DDIM</a> paper. Only applies | |
| to the <a href="/docs/diffusers/pr_10101/en/api/schedulers/ddim#diffusers.DDIMScheduler">DDIMScheduler</a>, and is ignored in other schedulers.`,name:"eta"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| A <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow"><code>torch.Generator</code></a> to make | |
| generation deterministic.`,name:"generator"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor is generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not | |
| provided, text embeddings are generated from the <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If | |
| not provided, <code>negative_prompt_embeds</code> are generated from the <code>negative_prompt</code> input argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.ip_adapter_image",description:"<strong>ip_adapter_image</strong> — (<code>PipelineImageInput</code>, <em>optional</em>): Optional image input to work with IP Adapters.",name:"ip_adapter_image"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.ip_adapter_image_embeds",description:`<strong>ip_adapter_image_embeds</strong> (<code>List[torch.Tensor]</code>, <em>optional</em>) — | |
| Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of | |
| IP-adapters. Each element should be a tensor of shape <code>(batch_size, num_images, emb_dim)</code>. It should | |
| contain the negative image embedding if <code>do_classifier_free_guidance</code> is set to <code>True</code>. If not | |
| provided, embeddings are computed from the <code>ip_adapter_image</code> input argument.`,name:"ip_adapter_image_embeds"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generated image. Choose between <code>PIL.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <a href="/docs/diffusers/pr_10101/en/api/pipelines/stable_diffusion/inpaint#diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput">StableDiffusionPipelineOutput</a> instead of a | |
| plain tuple.`,name:"return_dict"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.cross_attention_kwargs",description:`<strong>cross_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow"><code>self.processor</code></a>.`,name:"cross_attention_kwargs"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.clip_skip",description:`<strong>clip_skip</strong> (<code>int</code>, <em>optional</em>) — | |
| Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that | |
| the output of the pre-final layer will be used for computing the prompt embeddings.`,name:"clip_skip"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <code>PipelineCallback</code>, <code>MultiPipelineCallbacks</code>, <em>optional</em>) — | |
| A function or a subclass of <code>PipelineCallback</code> or <code>MultiPipelineCallbacks</code> that is called at the end of | |
| each denoising step during the inference. with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a | |
| list of all tensors as specified by <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"}],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py#L862",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>If <code>return_dict</code> is <code>True</code>, <a | |
| href="/docs/diffusers/pr_10101/en/api/pipelines/stable_diffusion/inpaint#diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput" | |
| >StableDiffusionPipelineOutput</a> is returned, | |
| otherwise a <code>tuple</code> is returned where the first element is a list with the generated images and the | |
| second element is a list of <code>bool</code>s indicating whether the corresponding generated image contains | |
| “not-safe-for-work” (nsfw) content.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><a | |
| href="/docs/diffusers/pr_10101/en/api/pipelines/stable_diffusion/inpaint#diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput" | |
| >StableDiffusionPipelineOutput</a> or <code>tuple</code></p> | |
| `}}),E=new lt({props:{anchor:"diffusers.StableDiffusionInpaintPipeline.__call__.example",$$slots:{default:[ia]},$$scope:{ctx:T}}}),fe=new P({props:{name:"enable_attention_slicing",anchor:"diffusers.StableDiffusionInpaintPipeline.enable_attention_slicing",parameters:[{name:"slice_size",val:": typing.Union[int, str, NoneType] = 'auto'"}],parametersDescription:[{anchor:"diffusers.StableDiffusionInpaintPipeline.enable_attention_slicing.slice_size",description:`<strong>slice_size</strong> (<code>str</code> or <code>int</code>, <em>optional</em>, defaults to <code>"auto"</code>) — | |
| When <code>"auto"</code>, halves the input to the attention heads, so attention will be computed in two steps. If | |
| <code>"max"</code>, maximum amount of memory will be saved by running only one slice at a time. If a number is | |
| provided, uses as many slices as <code>attention_head_dim // slice_size</code>. In this case, <code>attention_head_dim</code> | |
| must be a multiple of <code>slice_size</code>.`,name:"slice_size"}],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/pipelines/pipeline_utils.py#L1635"}}),Y=new St({props:{warning:!0,$$slots:{default:[oa]},$$scope:{ctx:T}}}),H=new lt({props:{anchor:"diffusers.StableDiffusionInpaintPipeline.enable_attention_slicing.example",$$slots:{default:[ra]},$$scope:{ctx:T}}}),me=new P({props:{name:"disable_attention_slicing",anchor:"diffusers.StableDiffusionInpaintPipeline.disable_attention_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/pipelines/pipeline_utils.py#L1675"}}),ue=new P({props:{name:"enable_xformers_memory_efficient_attention",anchor:"diffusers.StableDiffusionInpaintPipeline.enable_xformers_memory_efficient_attention",parameters:[{name:"attention_op",val:": typing.Optional[typing.Callable] = None"}],parametersDescription:[{anchor:"diffusers.StableDiffusionInpaintPipeline.enable_xformers_memory_efficient_attention.attention_op",description:`<strong>attention_op</strong> (<code>Callable</code>, <em>optional</em>) — | |
| Override the default <code>None</code> operator for use as <code>op</code> argument to the | |
| <a href="https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.memory_efficient_attention" rel="nofollow"><code>memory_efficient_attention()</code></a> | |
| function of xFormers.`,name:"attention_op"}],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/pipelines/pipeline_utils.py#L1574"}}),Q=new St({props:{warning:!0,$$slots:{default:[la]},$$scope:{ctx:T}}}),q=new lt({props:{anchor:"diffusers.StableDiffusionInpaintPipeline.enable_xformers_memory_efficient_attention.example",$$slots:{default:[da]},$$scope:{ctx:T}}}),ge=new P({props:{name:"disable_xformers_memory_efficient_attention",anchor:"diffusers.StableDiffusionInpaintPipeline.disable_xformers_memory_efficient_attention",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/pipelines/pipeline_utils.py#L1609"}}),he=new P({props:{name:"load_textual_inversion",anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion",parameters:[{name:"pretrained_model_name_or_path",val:": typing.Union[str, typing.List[str], typing.Dict[str, torch.Tensor], typing.List[typing.Dict[str, torch.Tensor]]]"},{name:"token",val:": typing.Union[typing.List[str], str, NoneType] = None"},{name:"tokenizer",val:": typing.Optional[ForwardRef('PreTrainedTokenizer')] = None"},{name:"text_encoder",val:": typing.Optional[ForwardRef('PreTrainedModel')] = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code> or <code>List[str or os.PathLike]</code> or <code>Dict</code> or <code>List[Dict]</code>) — | |
| Can be either one of the following or a list of them:</p> | |
| <ul> | |
| <li>A string, the <em>model id</em> (for example <code>sd-concepts-library/low-poly-hd-logos-icons</code>) of a | |
| pretrained model hosted on the Hub.</li> | |
| <li>A path to a <em>directory</em> (for example <code>./my_text_inversion_directory/</code>) containing the textual | |
| inversion weights.</li> | |
| <li>A path to a <em>file</em> (for example <code>./my_text_inversions.pt</code>) containing textual inversion weights.</li> | |
| <li>A <a href="https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict" rel="nofollow">torch state | |
| dict</a>.</li> | |
| </ul>`,name:"pretrained_model_name_or_path"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.token",description:`<strong>token</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| Override the token to use for the textual inversion weights. If <code>pretrained_model_name_or_path</code> is a | |
| list, then <code>token</code> must also be a list of equal length.`,name:"token"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.text_encoder",description:`<strong>text_encoder</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPTextModel" rel="nofollow">CLIPTextModel</a>, <em>optional</em>) — | |
| Frozen text-encoder (<a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a>). | |
| If not specified, function will take self.tokenizer.`,name:"text_encoder"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.tokenizer",description:`<strong>tokenizer</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>, <em>optional</em>) — | |
| A <code>CLIPTokenizer</code> to tokenize text. If not specified, function will take self.tokenizer.`,name:"tokenizer"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.weight_name",description:`<strong>weight_name</strong> (<code>str</code>, <em>optional</em>) — | |
| Name of a custom weight file. This should be used when:</p> | |
| <ul> | |
| <li>The saved textual inversion file is in 🤗 Diffusers format, but was saved under a specific weight | |
| name such as <code>text_inv.bin</code>.</li> | |
| <li>The saved textual inversion file is in the Automatic1111 format.</li> | |
| </ul>`,name:"weight_name"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.cache_dir",description:`<strong>cache_dir</strong> (<code>Union[str, os.PathLike]</code>, <em>optional</em>) — | |
| Path to a directory where a downloaded pretrained model configuration is cached if the standard cache | |
| is not used.`,name:"cache_dir"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.force_download",description:`<strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether or not to force the (re-)download of the model weights and configuration files, overriding the | |
| cached versions if they exist.`,name:"force_download"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.proxies",description:`<strong>proxies</strong> (<code>Dict[str, str]</code>, <em>optional</em>) — | |
| A dictionary of proxy servers to use by protocol or endpoint, for example, <code>{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}</code>. The proxies are used on each request.`,name:"proxies"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.local_files_only",description:`<strong>local_files_only</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether to only load local model weights and configuration files or not. If set to <code>True</code>, the model | |
| won’t be downloaded from the Hub.`,name:"local_files_only"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.token",description:`<strong>token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) — | |
| The token to use as HTTP bearer authorization for remote files. If <code>True</code>, the token generated from | |
| <code>diffusers-cli login</code> (stored in <code>~/.huggingface</code>) is used.`,name:"token"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.revision",description:`<strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"main"</code>) — | |
| The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier | |
| allowed by Git.`,name:"revision"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.subfolder",description:`<strong>subfolder</strong> (<code>str</code>, <em>optional</em>, defaults to <code>""</code>) — | |
| The subfolder location of a model file within a larger model repository on the Hub or locally.`,name:"subfolder"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.mirror",description:`<strong>mirror</strong> (<code>str</code>, <em>optional</em>) — | |
| Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not | |
| guarantee the timeliness or safety of the source, and you should refer to the mirror site for more | |
| information.`,name:"mirror"}],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/loaders/textual_inversion.py#L263"}}),K=new lt({props:{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.example",$$slots:{default:[pa]},$$scope:{ctx:T}}}),ee=new lt({props:{anchor:"diffusers.StableDiffusionInpaintPipeline.load_textual_inversion.example-2",$$slots:{default:[ca]},$$scope:{ctx:T}}}),_e=new P({props:{name:"load_lora_weights",anchor:"diffusers.StableDiffusionInpaintPipeline.load_lora_weights",parameters:[{name:"pretrained_model_name_or_path_or_dict",val:": typing.Union[str, typing.Dict[str, torch.Tensor]]"},{name:"adapter_name",val:" = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.StableDiffusionInpaintPipeline.load_lora_weights.pretrained_model_name_or_path_or_dict",description:`<strong>pretrained_model_name_or_path_or_dict</strong> (<code>str</code> or <code>os.PathLike</code> or <code>dict</code>) — | |
| See <a href="/docs/diffusers/pr_10101/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict">lora_state_dict()</a>.`,name:"pretrained_model_name_or_path_or_dict"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_lora_weights.adapter_name",description:`<strong>adapter_name</strong> (<code>str</code>, <em>optional</em>) — | |
| Adapter name to be used for referencing the loaded adapter model. If not specified, it will use | |
| <code>default_{i}</code> where i is the total number of adapters being loaded.`,name:"adapter_name"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_lora_weights.low_cpu_mem_usage",description:`<strong>low_cpu_mem_usage</strong> (<code>bool</code>, <em>optional</em>) — | |
| Speed up model loading by only loading the pretrained LoRA weights and not initializing the random | |
| weights.`,name:"low_cpu_mem_usage"},{anchor:"diffusers.StableDiffusionInpaintPipeline.load_lora_weights.kwargs",description:`<strong>kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| See <a href="/docs/diffusers/pr_10101/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict">lora_state_dict()</a>.`,name:"kwargs"}],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/loaders/lora_pipeline.py#L75"}}),be=new P({props:{name:"save_lora_weights",anchor:"diffusers.StableDiffusionInpaintPipeline.save_lora_weights",parameters:[{name:"save_directory",val:": typing.Union[str, os.PathLike]"},{name:"unet_lora_layers",val:": typing.Dict[str, typing.Union[torch.nn.modules.module.Module, torch.Tensor]] = None"},{name:"text_encoder_lora_layers",val:": typing.Dict[str, torch.nn.modules.module.Module] = None"},{name:"is_main_process",val:": bool = True"},{name:"weight_name",val:": str = None"},{name:"save_function",val:": typing.Callable = None"},{name:"safe_serialization",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.StableDiffusionInpaintPipeline.save_lora_weights.save_directory",description:`<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) — | |
| Directory to save LoRA parameters to. Will be created if it doesn’t exist.`,name:"save_directory"},{anchor:"diffusers.StableDiffusionInpaintPipeline.save_lora_weights.unet_lora_layers",description:`<strong>unet_lora_layers</strong> (<code>Dict[str, torch.nn.Module]</code> or <code>Dict[str, torch.Tensor]</code>) — | |
| State dict of the LoRA layers corresponding to the <code>unet</code>.`,name:"unet_lora_layers"},{anchor:"diffusers.StableDiffusionInpaintPipeline.save_lora_weights.text_encoder_lora_layers",description:`<strong>text_encoder_lora_layers</strong> (<code>Dict[str, torch.nn.Module]</code> or <code>Dict[str, torch.Tensor]</code>) — | |
| State dict of the LoRA layers corresponding to the <code>text_encoder</code>. Must explicitly pass the text | |
| encoder LoRA state dict because it comes from 🤗 Transformers.`,name:"text_encoder_lora_layers"},{anchor:"diffusers.StableDiffusionInpaintPipeline.save_lora_weights.is_main_process",description:`<strong>is_main_process</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether the process calling this is the main process or not. Useful during distributed training and you | |
| need to call this function on all processes. In this case, set <code>is_main_process=True</code> only on the main | |
| process to avoid race conditions.`,name:"is_main_process"},{anchor:"diffusers.StableDiffusionInpaintPipeline.save_lora_weights.save_function",description:`<strong>save_function</strong> (<code>Callable</code>) — | |
| The function to use to save the state dictionary. Useful during distributed training when you need to | |
| replace <code>torch.save</code> with another method. Can be configured with the environment variable | |
| <code>DIFFUSERS_SAVE_MODE</code>.`,name:"save_function"},{anchor:"diffusers.StableDiffusionInpaintPipeline.save_lora_weights.safe_serialization",description:`<strong>safe_serialization</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether to save the model using <code>safetensors</code> or the traditional PyTorch way with <code>pickle</code>.`,name:"safe_serialization"}],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/loaders/lora_pipeline.py#L449"}}),ye=new P({props:{name:"encode_prompt",anchor:"diffusers.StableDiffusionInpaintPipeline.encode_prompt",parameters:[{name:"prompt",val:""},{name:"device",val:""},{name:"num_images_per_prompt",val:""},{name:"do_classifier_free_guidance",val:""},{name:"negative_prompt",val:" = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"lora_scale",val:": typing.Optional[float] = None"},{name:"clip_skip",val:": typing.Optional[int] = None"}],parametersDescription:[{anchor:"diffusers.StableDiffusionInpaintPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.StableDiffusionInpaintPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>): | |
| torch device`,name:"device"},{anchor:"diffusers.StableDiffusionInpaintPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.StableDiffusionInpaintPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>) — | |
| whether to use classifier free guidance or not`,name:"do_classifier_free_guidance"},{anchor:"diffusers.StableDiffusionInpaintPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is | |
| less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.StableDiffusionInpaintPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.StableDiffusionInpaintPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.StableDiffusionInpaintPipeline.encode_prompt.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>, <em>optional</em>) — | |
| A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.`,name:"lora_scale"},{anchor:"diffusers.StableDiffusionInpaintPipeline.encode_prompt.clip_skip",description:`<strong>clip_skip</strong> (<code>int</code>, <em>optional</em>) — | |
| Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that | |
| the output of the pre-final layer will be used for computing the prompt embeddings.`,name:"clip_skip"}],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py#L294"}}),we=new P({props:{name:"get_guidance_scale_embedding",anchor:"diffusers.StableDiffusionInpaintPipeline.get_guidance_scale_embedding",parameters:[{name:"w",val:": Tensor"},{name:"embedding_dim",val:": int = 512"},{name:"dtype",val:": dtype = torch.float32"}],parametersDescription:[{anchor:"diffusers.StableDiffusionInpaintPipeline.get_guidance_scale_embedding.w",description:`<strong>w</strong> (<code>torch.Tensor</code>) — | |
| Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.`,name:"w"},{anchor:"diffusers.StableDiffusionInpaintPipeline.get_guidance_scale_embedding.embedding_dim",description:`<strong>embedding_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 512) — | |
| Dimension of the embeddings to generate.`,name:"embedding_dim"},{anchor:"diffusers.StableDiffusionInpaintPipeline.get_guidance_scale_embedding.dtype",description:`<strong>dtype</strong> (<code>torch.dtype</code>, <em>optional</em>, defaults to <code>torch.float32</code>) — | |
| Data type of the generated embeddings.`,name:"dtype"}],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py#L805",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>Embedding vectors with shape <code>(len(w), embedding_dim)</code>.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>torch.Tensor</code></p> | |
| `}}),ve=new dt({props:{title:"StableDiffusionPipelineOutput",local:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput",headingTag:"h2"}}),xe=new P({props:{name:"class diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput",anchor:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput",parameters:[{name:"images",val:": typing.Union[typing.List[PIL.Image.Image], numpy.ndarray]"},{name:"nsfw_content_detected",val:": typing.Optional[typing.List[bool]]"}],parametersDescription:[{anchor:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput.images",description:`<strong>images</strong> (<code>List[PIL.Image.Image]</code> or <code>np.ndarray</code>) — | |
| List of denoised PIL images of length <code>batch_size</code> or NumPy array of shape <code>(batch_size, height, width, num_channels)</code>.`,name:"images"},{anchor:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput.nsfw_content_detected",description:`<strong>nsfw_content_detected</strong> (<code>List[bool]</code>) — | |
| List indicating whether the corresponding generated image contains “not-safe-for-work” (nsfw) content or | |
| <code>None</code> if safety checking could not be performed.`,name:"nsfw_content_detected"}],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/pipelines/stable_diffusion/pipeline_output.py#L10"}}),Ie=new dt({props:{title:"FlaxStableDiffusionInpaintPipeline",local:"diffusers.FlaxStableDiffusionInpaintPipeline",headingTag:"h2"}}),Me=new P({props:{name:"class diffusers.FlaxStableDiffusionInpaintPipeline",anchor:"diffusers.FlaxStableDiffusionInpaintPipeline",parameters:[{name:"vae",val:": FlaxAutoencoderKL"},{name:"text_encoder",val:": FlaxCLIPTextModel"},{name:"tokenizer",val:": CLIPTokenizer"},{name:"unet",val:": FlaxUNet2DConditionModel"},{name:"scheduler",val:": typing.Union[diffusers.schedulers.scheduling_ddim_flax.FlaxDDIMScheduler, diffusers.schedulers.scheduling_pndm_flax.FlaxPNDMScheduler, diffusers.schedulers.scheduling_lms_discrete_flax.FlaxLMSDiscreteScheduler, diffusers.schedulers.scheduling_dpmsolver_multistep_flax.FlaxDPMSolverMultistepScheduler]"},{name:"safety_checker",val:": FlaxStableDiffusionSafetyChecker"},{name:"feature_extractor",val:": CLIPImageProcessor"},{name:"dtype",val:": dtype = <class 'jax.numpy.float32'>"}],parametersDescription:[{anchor:"diffusers.FlaxStableDiffusionInpaintPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_10101/en/api/models/autoencoderkl#diffusers.FlaxAutoencoderKL">FlaxAutoencoderKL</a>) — | |
| Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.FlaxStableDiffusionInpaintPipeline.text_encoder",description:`<strong>text_encoder</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.FlaxCLIPTextModel" rel="nofollow">FlaxCLIPTextModel</a>) — | |
| Frozen text-encoder (<a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a>).`,name:"text_encoder"},{anchor:"diffusers.FlaxStableDiffusionInpaintPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>) — | |
| A <code>CLIPTokenizer</code> to tokenize text.`,name:"tokenizer"},{anchor:"diffusers.FlaxStableDiffusionInpaintPipeline.unet",description:`<strong>unet</strong> (<a href="/docs/diffusers/pr_10101/en/api/models/unet2d-cond#diffusers.FlaxUNet2DConditionModel">FlaxUNet2DConditionModel</a>) — | |
| A <code>FlaxUNet2DConditionModel</code> to denoise the encoded image latents.`,name:"unet"},{anchor:"diffusers.FlaxStableDiffusionInpaintPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_10101/en/api/schedulers/overview#diffusers.SchedulerMixin">SchedulerMixin</a>) — | |
| A scheduler to be used in combination with <code>unet</code> to denoise the encoded image latents. Can be one of | |
| <code>FlaxDDIMScheduler</code>, <code>FlaxLMSDiscreteScheduler</code>, <code>FlaxPNDMScheduler</code>, or | |
| <code>FlaxDPMSolverMultistepScheduler</code>.`,name:"scheduler"},{anchor:"diffusers.FlaxStableDiffusionInpaintPipeline.safety_checker",description:`<strong>safety_checker</strong> (<code>FlaxStableDiffusionSafetyChecker</code>) — | |
| Classification module that estimates whether generated images could be considered offensive or harmful. | |
| Please refer to the <a href="https://huggingface.co/runwayml/stable-diffusion-v1-5" rel="nofollow">model card</a> for more details | |
| about a model’s potential harms.`,name:"safety_checker"},{anchor:"diffusers.FlaxStableDiffusionInpaintPipeline.feature_extractor",description:`<strong>feature_extractor</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPImageProcessor" rel="nofollow">CLIPImageProcessor</a>) — | |
| A <code>CLIPImageProcessor</code> to extract features from generated images; used as inputs to the <code>safety_checker</code>.`,name:"feature_extractor"}],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_inpaint.py#L102"}}),se=new St({props:{warning:!0,$$slots:{default:[fa]},$$scope:{ctx:T}}}),Te=new P({props:{name:"__call__",anchor:"diffusers.FlaxStableDiffusionInpaintPipeline.__call__",parameters:[{name:"prompt_ids",val:": Array"},{name:"mask",val:": Array"},{name:"masked_image",val:": Array"},{name:"params",val:": typing.Union[typing.Dict, flax.core.frozen_dict.FrozenDict]"},{name:"prng_seed",val:": Array"},{name:"num_inference_steps",val:": int = 50"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"guidance_scale",val:": typing.Union[float, jax.Array] = 7.5"},{name:"latents",val:": Array = None"},{name:"neg_prompt_ids",val:": Array = None"},{name:"return_dict",val:": bool = True"},{name:"jit",val:": bool = False"}],parametersDescription:[{anchor:"diffusers.FlaxStableDiffusionInpaintPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>) — | |
| The prompt or prompts to guide image generation.`,name:"prompt"},{anchor:"diffusers.FlaxStableDiffusionInpaintPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to <code>self.unet.config.sample_size * self.vae_scale_factor</code>) — | |
| The height in pixels of the generated image.`,name:"height"},{anchor:"diffusers.FlaxStableDiffusionInpaintPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to <code>self.unet.config.sample_size * self.vae_scale_factor</code>) — | |
| The width in pixels of the generated image.`,name:"width"},{anchor:"diffusers.FlaxStableDiffusionInpaintPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference. This parameter is modulated by <code>strength</code>.`,name:"num_inference_steps"},{anchor:"diffusers.FlaxStableDiffusionInpaintPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 7.5) — | |
| A higher guidance scale value encourages the model to generate images closely linked to the text | |
| <code>prompt</code> at the expense of lower image quality. Guidance scale is enabled when <code>guidance_scale > 1</code>.`,name:"guidance_scale"},{anchor:"diffusers.FlaxStableDiffusionInpaintPipeline.__call__.latents",description:`<strong>latents</strong> (<code>jnp.ndarray</code>, <em>optional</em>) — | |
| Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| array is generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.FlaxStableDiffusionInpaintPipeline.__call__.jit",description:`<strong>jit</strong> (<code>bool</code>, defaults to <code>False</code>) — | |
| Whether to run <code>pmap</code> versions of the generation and safety scoring functions.</p> | |
| <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> | |
| <p>This argument exists because <code>__call__</code> is not yet end-to-end pmap-able. It will be removed in a | |
| future release.</p> | |
| </div>`,name:"jit"},{anchor:"diffusers.FlaxStableDiffusionInpaintPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <a href="/docs/diffusers/pr_10101/en/api/pipelines/stable_diffusion/inpaint#diffusers.pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput">FlaxStableDiffusionPipelineOutput</a> instead of | |
| a plain tuple.`,name:"return_dict"}],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_inpaint.py#L394",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>If <code>return_dict</code> is <code>True</code>, <a | |
| href="/docs/diffusers/pr_10101/en/api/pipelines/stable_diffusion/inpaint#diffusers.pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput" | |
| >FlaxStableDiffusionPipelineOutput</a> is | |
| returned, otherwise a <code>tuple</code> is returned where the first element is a list with the generated images | |
| and the second element is a list of <code>bool</code>s indicating whether the corresponding generated image | |
| contains “not-safe-for-work” (nsfw) content.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><a | |
| href="/docs/diffusers/pr_10101/en/api/pipelines/stable_diffusion/inpaint#diffusers.pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput" | |
| >FlaxStableDiffusionPipelineOutput</a> or <code>tuple</code></p> | |
| `}}),ie=new lt({props:{anchor:"diffusers.FlaxStableDiffusionInpaintPipeline.__call__.example",$$slots:{default:[ma]},$$scope:{ctx:T}}}),$e=new dt({props:{title:"FlaxStableDiffusionPipelineOutput",local:"diffusers.pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput",headingTag:"h2"}}),Je=new P({props:{name:"class diffusers.pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput",anchor:"diffusers.pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput",parameters:[{name:"images",val:": ndarray"},{name:"nsfw_content_detected",val:": typing.List[bool]"}],parametersDescription:[{anchor:"diffusers.pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput.images",description:`<strong>images</strong> (<code>np.ndarray</code>) — | |
| Denoised images of array shape of <code>(batch_size, height, width, num_channels)</code>.`,name:"images"},{anchor:"diffusers.pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput.nsfw_content_detected",description:`<strong>nsfw_content_detected</strong> (<code>List[bool]</code>) — | |
| List indicating whether the corresponding generated image contains “not-safe-for-work” (nsfw) content | |
| or <code>None</code> if safety checking could not be performed.`,name:"nsfw_content_detected"}],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/pipelines/stable_diffusion/pipeline_output.py#L31"}}),ke=new P({props:{name:"replace",anchor:"diffusers.pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput.replace",parameters:[{name:"**updates",val:""}],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/flax/struct.py#L144"}}),Se=new aa({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/stable_diffusion/inpaint.md"}}),{c(){n=d("meta"),I=s(),l=d("p"),o=s(),u(m.$$.fragment),t=s(),v=d("p"),v.textContent=Mn,ft=s(),u(re.$$.fragment),mt=s(),le=d("p"),le.innerHTML=Tn,ut=s(),u(V.$$.fragment),gt=s(),u(de.$$.fragment),ht=s(),x=d("div"),u(pe.$$.fragment),Pt=s(),Ze=d("p"),Ze.textContent=$n,jt=s(),Ce=d("p"),Ce.innerHTML=Jn,Dt=s(),We=d("p"),We.textContent=kn,Ut=s(),Le=d("ul"),Le.innerHTML=Sn,Zt=s(),N=d("div"),u(ce.$$.fragment),Ct=s(),Fe=d("p"),Fe.textContent=Pn,Wt=s(),u(E.$$.fragment),Lt=s(),Z=d("div"),u(fe.$$.fragment),Ft=s(),Ne=d("p"),Ne.textContent=jn,Nt=s(),u(Y.$$.fragment),Gt=s(),u(H.$$.fragment),Bt=s(),A=d("div"),u(me.$$.fragment),Xt=s(),Ge=d("p"),Ge.innerHTML=Dn,zt=s(),C=d("div"),u(ue.$$.fragment),Rt=s(),Be=d("p"),Be.innerHTML=Un,Vt=s(),u(Q.$$.fragment),Et=s(),u(q.$$.fragment),Yt=s(),O=d("div"),u(ge.$$.fragment),Ht=s(),Xe=d("p"),Xe.innerHTML=Zn,At=s(),k=d("div"),u(he.$$.fragment),Qt=s(),ze=d("p"),ze.innerHTML=Cn,qt=s(),Re=d("p"),Re.textContent=Wn,Ot=s(),u(K.$$.fragment),Kt=s(),Ve=d("p"),Ve.innerHTML=Ln,en=s(),u(ee.$$.fragment),tn=s(),S=d("div"),u(_e.$$.fragment),nn=s(),Ee=d("p"),Ee.innerHTML=Fn,an=s(),Ye=d("p"),Ye.innerHTML=Nn,sn=s(),He=d("p"),He.innerHTML=Gn,on=s(),Ae=d("p"),Ae.innerHTML=Bn,rn=s(),Qe=d("p"),Qe.innerHTML=Xn,ln=s(),te=d("div"),u(be.$$.fragment),dn=s(),qe=d("p"),qe.textContent=zn,pn=s(),ne=d("div"),u(ye.$$.fragment),cn=s(),Oe=d("p"),Oe.textContent=Rn,fn=s(),ae=d("div"),u(we.$$.fragment),mn=s(),Ke=d("p"),Ke.innerHTML=Vn,_t=s(),u(ve.$$.fragment),bt=s(),z=d("div"),u(xe.$$.fragment),un=s(),et=d("p"),et.textContent=En,yt=s(),u(Ie.$$.fragment),wt=s(),j=d("div"),u(Me.$$.fragment),gn=s(),tt=d("p"),tt.textContent=Yn,hn=s(),u(se.$$.fragment),_n=s(),nt=d("p"),nt.innerHTML=Hn,bn=s(),G=d("div"),u(Te.$$.fragment),yn=s(),at=d("p"),at.textContent=An,wn=s(),u(ie.$$.fragment),vt=s(),u($e.$$.fragment),xt=s(),W=d("div"),u(Je.$$.fragment),vn=s(),st=d("p"),st.textContent=Qn,xn=s(),oe=d("div"),u(ke.$$.fragment),In=s(),it=d("p"),it.textContent=qn,It=s(),u(Se.$$.fragment),Mt=s(),ct=d("p"),this.h()},l(e){const c=na("svelte-u9bgzb",document.head);n=p(c,"META",{name:!0,content:!0}),c.forEach(r),I=i(e),l=p(e,"P",{}),$(l).forEach(r),o=i(e),g(m.$$.fragment,e),t=i(e),v=p(e,"P",{"data-svelte-h":!0}),w(v)!=="svelte-duffv7"&&(v.textContent=Mn),ft=i(e),g(re.$$.fragment,e),mt=i(e),le=p(e,"P",{"data-svelte-h":!0}),w(le)!=="svelte-1q68sgx"&&(le.innerHTML=Tn),ut=i(e),g(V.$$.fragment,e),gt=i(e),g(de.$$.fragment,e),ht=i(e),x=p(e,"DIV",{class:!0});var M=$(x);g(pe.$$.fragment,M),Pt=i(M),Ze=p(M,"P",{"data-svelte-h":!0}),w(Ze)!=="svelte-80nqwb"&&(Ze.textContent=$n),jt=i(M),Ce=p(M,"P",{"data-svelte-h":!0}),w(Ce)!=="svelte-d6eqcp"&&(Ce.innerHTML=Jn),Dt=i(M),We=p(M,"P",{"data-svelte-h":!0}),w(We)!=="svelte-14s6m4u"&&(We.textContent=kn),Ut=i(M),Le=p(M,"UL",{"data-svelte-h":!0}),w(Le)!=="svelte-fno55m"&&(Le.innerHTML=Sn),Zt=i(M),N=p(M,"DIV",{class:!0});var R=$(N);g(ce.$$.fragment,R),Ct=i(R),Fe=p(R,"P",{"data-svelte-h":!0}),w(Fe)!=="svelte-50j04k"&&(Fe.textContent=Pn),Wt=i(R),g(E.$$.fragment,R),R.forEach(r),Lt=i(M),Z=p(M,"DIV",{class:!0});var L=$(Z);g(fe.$$.fragment,L),Ft=i(L),Ne=p(L,"P",{"data-svelte-h":!0}),w(Ne)!=="svelte-10jaql7"&&(Ne.textContent=jn),Nt=i(L),g(Y.$$.fragment,L),Gt=i(L),g(H.$$.fragment,L),L.forEach(r),Bt=i(M),A=p(M,"DIV",{class:!0});var Pe=$(A);g(me.$$.fragment,Pe),Xt=i(Pe),Ge=p(Pe,"P",{"data-svelte-h":!0}),w(Ge)!=="svelte-1lh0nh5"&&(Ge.innerHTML=Dn),Pe.forEach(r),zt=i(M),C=p(M,"DIV",{class:!0});var F=$(C);g(ue.$$.fragment,F),Rt=i(F),Be=p(F,"P",{"data-svelte-h":!0}),w(Be)!=="svelte-e03q3e"&&(Be.innerHTML=Un),Vt=i(F),g(Q.$$.fragment,F),Et=i(F),g(q.$$.fragment,F),F.forEach(r),Yt=i(M),O=p(M,"DIV",{class:!0});var je=$(O);g(ge.$$.fragment,je),Ht=i(je),Xe=p(je,"P",{"data-svelte-h":!0}),w(Xe)!=="svelte-1vfte1e"&&(Xe.innerHTML=Zn),je.forEach(r),At=i(M),k=p(M,"DIV",{class:!0});var D=$(k);g(he.$$.fragment,D),Qt=i(D),ze=p(D,"P",{"data-svelte-h":!0}),w(ze)!=="svelte-yloxl5"&&(ze.innerHTML=Cn),qt=i(D),Re=p(D,"P",{"data-svelte-h":!0}),w(Re)!=="svelte-11lpom8"&&(Re.textContent=Wn),Ot=i(D),g(K.$$.fragment,D),Kt=i(D),Ve=p(D,"P",{"data-svelte-h":!0}),w(Ve)!=="svelte-15d7mv5"&&(Ve.innerHTML=Ln),en=i(D),g(ee.$$.fragment,D),D.forEach(r),tn=i(M),S=p(M,"DIV",{class:!0});var U=$(S);g(_e.$$.fragment,U),nn=i(U),Ee=p(U,"P",{"data-svelte-h":!0}),w(Ee)!=="svelte-vs7s0z"&&(Ee.innerHTML=Fn),an=i(U),Ye=p(U,"P",{"data-svelte-h":!0}),w(Ye)!=="svelte-15b960v"&&(Ye.innerHTML=Nn),sn=i(U),He=p(U,"P",{"data-svelte-h":!0}),w(He)!=="svelte-1afgu91"&&(He.innerHTML=Gn),on=i(U),Ae=p(U,"P",{"data-svelte-h":!0}),w(Ae)!=="svelte-196325u"&&(Ae.innerHTML=Bn),rn=i(U),Qe=p(U,"P",{"data-svelte-h":!0}),w(Qe)!=="svelte-10d53ug"&&(Qe.innerHTML=Xn),U.forEach(r),ln=i(M),te=p(M,"DIV",{class:!0});var De=$(te);g(be.$$.fragment,De),dn=i(De),qe=p(De,"P",{"data-svelte-h":!0}),w(qe)!=="svelte-1ufq5ot"&&(qe.textContent=zn),De.forEach(r),pn=i(M),ne=p(M,"DIV",{class:!0});var Ue=$(ne);g(ye.$$.fragment,Ue),cn=i(Ue),Oe=p(Ue,"P",{"data-svelte-h":!0}),w(Oe)!=="svelte-16q0ax1"&&(Oe.textContent=Rn),Ue.forEach(r),fn=i(M),ae=p(M,"DIV",{class:!0});var $t=$(ae);g(we.$$.fragment,$t),mn=i($t),Ke=p($t,"P",{"data-svelte-h":!0}),w(Ke)!=="svelte-vo59ec"&&(Ke.innerHTML=Vn),$t.forEach(r),M.forEach(r),_t=i(e),g(ve.$$.fragment,e),bt=i(e),z=p(e,"DIV",{class:!0});var Jt=$(z);g(xe.$$.fragment,Jt),un=i(Jt),et=p(Jt,"P",{"data-svelte-h":!0}),w(et)!=="svelte-1qpjiuf"&&(et.textContent=En),Jt.forEach(r),yt=i(e),g(Ie.$$.fragment,e),wt=i(e),j=p(e,"DIV",{class:!0});var B=$(j);g(Me.$$.fragment,B),gn=i(B),tt=p(B,"P",{"data-svelte-h":!0}),w(tt)!=="svelte-1ieif9y"&&(tt.textContent=Yn),hn=i(B),g(se.$$.fragment,B),_n=i(B),nt=p(B,"P",{"data-svelte-h":!0}),w(nt)!=="svelte-1czipw9"&&(nt.innerHTML=Hn),bn=i(B),G=p(B,"DIV",{class:!0});var ot=$(G);g(Te.$$.fragment,ot),yn=i(ot),at=p(ot,"P",{"data-svelte-h":!0}),w(at)!=="svelte-v78lg8"&&(at.textContent=An),wn=i(ot),g(ie.$$.fragment,ot),ot.forEach(r),B.forEach(r),vt=i(e),g($e.$$.fragment,e),xt=i(e),W=p(e,"DIV",{class:!0});var rt=$(W);g(Je.$$.fragment,rt),vn=i(rt),st=p(rt,"P",{"data-svelte-h":!0}),w(st)!=="svelte-wzv2jc"&&(st.textContent=Qn),xn=i(rt),oe=p(rt,"DIV",{class:!0});var kt=$(oe);g(ke.$$.fragment,kt),In=i(kt),it=p(kt,"P",{"data-svelte-h":!0}),w(it)!=="svelte-brjshm"&&(it.textContent=qn),kt.forEach(r),rt.forEach(r),It=i(e),g(Se.$$.fragment,e),Mt=i(e),ct=p(e,"P",{}),$(ct).forEach(r),this.h()},h(){J(n,"name","hf:doc:metadata"),J(n,"content",ga),J(N,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),J(Z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),J(A,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),J(C,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),J(O,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),J(k,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),J(S,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),J(te,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),J(ne,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),J(ae,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),J(x,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),J(z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),J(G,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),J(j,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),J(oe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),J(W,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,c){a(document.head,n),f(e,I,c),f(e,l,c),f(e,o,c),h(m,e,c),f(e,t,c),f(e,v,c),f(e,ft,c),h(re,e,c),f(e,mt,c),f(e,le,c),f(e,ut,c),h(V,e,c),f(e,gt,c),h(de,e,c),f(e,ht,c),f(e,x,c),h(pe,x,null),a(x,Pt),a(x,Ze),a(x,jt),a(x,Ce),a(x,Dt),a(x,We),a(x,Ut),a(x,Le),a(x,Zt),a(x,N),h(ce,N,null),a(N,Ct),a(N,Fe),a(N,Wt),h(E,N,null),a(x,Lt),a(x,Z),h(fe,Z,null),a(Z,Ft),a(Z,Ne),a(Z,Nt),h(Y,Z,null),a(Z,Gt),h(H,Z,null),a(x,Bt),a(x,A),h(me,A,null),a(A,Xt),a(A,Ge),a(x,zt),a(x,C),h(ue,C,null),a(C,Rt),a(C,Be),a(C,Vt),h(Q,C,null),a(C,Et),h(q,C,null),a(x,Yt),a(x,O),h(ge,O,null),a(O,Ht),a(O,Xe),a(x,At),a(x,k),h(he,k,null),a(k,Qt),a(k,ze),a(k,qt),a(k,Re),a(k,Ot),h(K,k,null),a(k,Kt),a(k,Ve),a(k,en),h(ee,k,null),a(x,tn),a(x,S),h(_e,S,null),a(S,nn),a(S,Ee),a(S,an),a(S,Ye),a(S,sn),a(S,He),a(S,on),a(S,Ae),a(S,rn),a(S,Qe),a(x,ln),a(x,te),h(be,te,null),a(te,dn),a(te,qe),a(x,pn),a(x,ne),h(ye,ne,null),a(ne,cn),a(ne,Oe),a(x,fn),a(x,ae),h(we,ae,null),a(ae,mn),a(ae,Ke),f(e,_t,c),h(ve,e,c),f(e,bt,c),f(e,z,c),h(xe,z,null),a(z,un),a(z,et),f(e,yt,c),h(Ie,e,c),f(e,wt,c),f(e,j,c),h(Me,j,null),a(j,gn),a(j,tt),a(j,hn),h(se,j,null),a(j,_n),a(j,nt),a(j,bn),a(j,G),h(Te,G,null),a(G,yn),a(G,at),a(G,wn),h(ie,G,null),f(e,vt,c),h($e,e,c),f(e,xt,c),f(e,W,c),h(Je,W,null),a(W,vn),a(W,st),a(W,xn),a(W,oe),h(ke,oe,null),a(oe,In),a(oe,it),f(e,It,c),h(Se,e,c),f(e,Mt,c),f(e,ct,c),Tt=!0},p(e,[c]){const M={};c&2&&(M.$$scope={dirty:c,ctx:e}),V.$set(M);const R={};c&2&&(R.$$scope={dirty:c,ctx:e}),E.$set(R);const L={};c&2&&(L.$$scope={dirty:c,ctx:e}),Y.$set(L);const Pe={};c&2&&(Pe.$$scope={dirty:c,ctx:e}),H.$set(Pe);const F={};c&2&&(F.$$scope={dirty:c,ctx:e}),Q.$set(F);const je={};c&2&&(je.$$scope={dirty:c,ctx:e}),q.$set(je);const D={};c&2&&(D.$$scope={dirty:c,ctx:e}),K.$set(D);const U={};c&2&&(U.$$scope={dirty:c,ctx:e}),ee.$set(U);const De={};c&2&&(De.$$scope={dirty:c,ctx:e}),se.$set(De);const Ue={};c&2&&(Ue.$$scope={dirty:c,ctx:e}),ie.$set(Ue)},i(e){Tt||(_(m.$$.fragment,e),_(re.$$.fragment,e),_(V.$$.fragment,e),_(de.$$.fragment,e),_(pe.$$.fragment,e),_(ce.$$.fragment,e),_(E.$$.fragment,e),_(fe.$$.fragment,e),_(Y.$$.fragment,e),_(H.$$.fragment,e),_(me.$$.fragment,e),_(ue.$$.fragment,e),_(Q.$$.fragment,e),_(q.$$.fragment,e),_(ge.$$.fragment,e),_(he.$$.fragment,e),_(K.$$.fragment,e),_(ee.$$.fragment,e),_(_e.$$.fragment,e),_(be.$$.fragment,e),_(ye.$$.fragment,e),_(we.$$.fragment,e),_(ve.$$.fragment,e),_(xe.$$.fragment,e),_(Ie.$$.fragment,e),_(Me.$$.fragment,e),_(se.$$.fragment,e),_(Te.$$.fragment,e),_(ie.$$.fragment,e),_($e.$$.fragment,e),_(Je.$$.fragment,e),_(ke.$$.fragment,e),_(Se.$$.fragment,e),Tt=!0)},o(e){b(m.$$.fragment,e),b(re.$$.fragment,e),b(V.$$.fragment,e),b(de.$$.fragment,e),b(pe.$$.fragment,e),b(ce.$$.fragment,e),b(E.$$.fragment,e),b(fe.$$.fragment,e),b(Y.$$.fragment,e),b(H.$$.fragment,e),b(me.$$.fragment,e),b(ue.$$.fragment,e),b(Q.$$.fragment,e),b(q.$$.fragment,e),b(ge.$$.fragment,e),b(he.$$.fragment,e),b(K.$$.fragment,e),b(ee.$$.fragment,e),b(_e.$$.fragment,e),b(be.$$.fragment,e),b(ye.$$.fragment,e),b(we.$$.fragment,e),b(ve.$$.fragment,e),b(xe.$$.fragment,e),b(Ie.$$.fragment,e),b(Me.$$.fragment,e),b(se.$$.fragment,e),b(Te.$$.fragment,e),b(ie.$$.fragment,e),b($e.$$.fragment,e),b(Je.$$.fragment,e),b(ke.$$.fragment,e),b(Se.$$.fragment,e),Tt=!1},d(e){e&&(r(I),r(l),r(o),r(t),r(v),r(ft),r(mt),r(le),r(ut),r(gt),r(ht),r(x),r(_t),r(bt),r(z),r(yt),r(wt),r(j),r(vt),r(xt),r(W),r(It),r(Mt),r(ct)),r(n),y(m,e),y(re,e),y(V,e),y(de,e),y(pe),y(ce),y(E),y(fe),y(Y),y(H),y(me),y(ue),y(Q),y(q),y(ge),y(he),y(K),y(ee),y(_e),y(be),y(ye),y(we),y(ve,e),y(xe),y(Ie,e),y(Me),y(se),y(Te),y(ie),y($e,e),y(Je),y(ke),y(Se,e)}}}const ga='{"title":"Inpainting","local":"inpainting","sections":[{"title":"Tips","local":"tips","sections":[],"depth":2},{"title":"StableDiffusionInpaintPipeline","local":"diffusers.StableDiffusionInpaintPipeline","sections":[],"depth":2},{"title":"StableDiffusionPipelineOutput","local":"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput","sections":[],"depth":2},{"title":"FlaxStableDiffusionInpaintPipeline","local":"diffusers.FlaxStableDiffusionInpaintPipeline","sections":[],"depth":2},{"title":"FlaxStableDiffusionPipelineOutput","local":"diffusers.pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput","sections":[],"depth":2}],"depth":1}';function ha(T){return Kn(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Ma extends ea{constructor(n){super(),ta(this,n,ha,ua,On,{})}}export{Ma as component}; | |
Xet Storage Details
- Size:
- 93.6 kB
- Xet hash:
- a4efb4c3e2cf0aa3e6818076875a26df4ac786312bb5c0f4ff6cb92e1a717b8a
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.