Buckets:

rtrm's picture
download
raw
66.1 kB
import{s as un,o as hn,n as Y}from"../chunks/scheduler.53228c21.js";import{S as gn,i as _n,e as r,s as i,c as _,h as bn,a as l,d as c,b as a,f as X,g as b,j as d,k as L,l as o,m as h,n as y,t as v,o as w,p as x}from"../chunks/index.100fac89.js";import{C as yn}from"../chunks/CopyLLMTxtMenu.7aefc1a4.js";import{D as V}from"../chunks/Docstring.d6cb35e8.js";import{C as q}from"../chunks/CodeBlock.d30a6509.js";import{E as Q}from"../chunks/ExampleCodeBlock.a12c1377.js";import{H as bt,E as vn}from"../chunks/MermaidChart.svelte_svelte_type_style_lang.3722da43.js";function wn(C){let n,g="If you get the error message below, you need to finetune the weights for your downstream task:",p,s,f;return s=new q({props:{code:"U29tZSUyMHdlaWdodHMlMjBvZiUyMFVOZXQyRENvbmRpdGlvbk1vZGVsJTIwd2VyZSUyMG5vdCUyMGluaXRpYWxpemVkJTIwZnJvbSUyMHRoZSUyMG1vZGVsJTIwY2hlY2twb2ludCUyMGF0JTIwc3RhYmxlLWRpZmZ1c2lvbi12MS01JTJGc3RhYmxlLWRpZmZ1c2lvbi12MS01JTIwYW5kJTIwYXJlJTIwbmV3bHklMjBpbml0aWFsaXplZCUyMGJlY2F1c2UlMjB0aGUlMjBzaGFwZXMlMjBkaWQlMjBub3QlMjBtYXRjaCUzQSUwQS0lMjBjb252X2luLndlaWdodCUzQSUyMGZvdW5kJTIwc2hhcGUlMjB0b3JjaC5TaXplKCU1QjMyMCUyQyUyMDQlMkMlMjAzJTJDJTIwMyU1RCklMjBpbiUyMHRoZSUyMGNoZWNrcG9pbnQlMjBhbmQlMjB0b3JjaC5TaXplKCU1QjMyMCUyQyUyMDklMkMlMjAzJTJDJTIwMyU1RCklMjBpbiUyMHRoZSUyMG1vZGVsJTIwaW5zdGFudGlhdGVkJTBBWW91JTIwc2hvdWxkJTIwcHJvYmFibHklMjBUUkFJTiUyMHRoaXMlMjBtb2RlbCUyMG9uJTIwYSUyMGRvd24tc3RyZWFtJTIwdGFzayUyMHRvJTIwYmUlMjBhYmxlJTIwdG8lMjB1c2UlMjBpdCUyMGZvciUyMHByZWRpY3Rpb25zJTIwYW5kJTIwaW5mZXJlbmNlLg==",highlighted:`Some weights of UNet2DConditionModel were not initialized from the model checkpoint <span class="hljs-built_in">at</span> stable-<span class="hljs-keyword">diffusion-v1-5/stable-diffusion-v1-5 </span><span class="hljs-keyword">and </span>are newly initialized <span class="hljs-keyword">because </span>the <span class="hljs-keyword">shapes </span><span class="hljs-keyword">did </span>not match:
- conv_in.weight: found <span class="hljs-keyword">shape </span>torch.Size([<span class="hljs-number">320</span>, <span class="hljs-number">4</span>, <span class="hljs-number">3</span>, <span class="hljs-number">3</span>]) in the checkpoint <span class="hljs-keyword">and </span>torch.Size([<span class="hljs-number">320</span>, <span class="hljs-number">9</span>, <span class="hljs-number">3</span>, <span class="hljs-number">3</span>]) in the model <span class="hljs-keyword">instantiated
</span>You <span class="hljs-keyword">should </span>probably TRAIN this model on a down-stream task to <span class="hljs-keyword">be </span>able to use it for predictions <span class="hljs-keyword">and </span>inference.`,wrap:!1}}),{c(){n=r("p"),n.textContent=g,p=i(),_(s.$$.fragment)},l(e){n=l(e,"P",{"data-svelte-h":!0}),d(n)!=="svelte-xueb0m"&&(n.textContent=g),p=a(e),b(s.$$.fragment,e)},m(e,u){h(e,n,u),h(e,p,u),y(s,e,u),f=!0},p:Y,i(e){f||(v(s.$$.fragment,e),f=!0)},o(e){w(s.$$.fragment,e),f=!1},d(e){e&&(c(n),c(p)),x(s,e)}}}function xn(C){let n,g="Examples:",p,s,f;return s=new q({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMEF1dG9QaXBlbGluZUZvclRleHQySW1hZ2UlMEElMEFwaXBlbGluZSUyMCUzRCUyMEF1dG9QaXBlbGluZUZvclRleHQySW1hZ2UuZnJvbV9wcmV0cmFpbmVkKCUyMnN0YWJsZS1kaWZmdXNpb24tdjEtNSUyRnN0YWJsZS1kaWZmdXNpb24tdjEtNSUyMiklMEFpbWFnZSUyMCUzRCUyMHBpcGVsaW5lKHByb21wdCkuaW1hZ2VzJTVCMCU1RA==",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> AutoPipelineForText2Image
<span class="hljs-meta">&gt;&gt;&gt; </span>pipeline = AutoPipelineForText2Image.from_pretrained(<span class="hljs-string">&quot;stable-diffusion-v1-5/stable-diffusion-v1-5&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>image = pipeline(prompt).images[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){n=r("p"),n.textContent=g,p=i(),_(s.$$.fragment)},l(e){n=l(e,"P",{"data-svelte-h":!0}),d(n)!=="svelte-kvfsh7"&&(n.textContent=g),p=a(e),b(s.$$.fragment,e)},m(e,u){h(e,n,u),h(e,p,u),y(s,e,u),f=!0},p:Y,i(e){f||(v(s.$$.fragment,e),f=!0)},o(e){w(s.$$.fragment,e),f=!1},d(e){e&&(c(n),c(p)),x(s,e)}}}function Mn(C){let n,g;return n=new q({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMEF1dG9QaXBlbGluZUZvclRleHQySW1hZ2UlMkMlMjBBdXRvUGlwZWxpbmVGb3JJbWFnZTJJbWFnZSUwQSUwQXBpcGVfaTJpJTIwJTNEJTIwQXV0b1BpcGVsaW5lRm9ySW1hZ2UySW1hZ2UuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMnN0YWJsZS1kaWZmdXNpb24tdjEtNSUyRnN0YWJsZS1kaWZmdXNpb24tdjEtNSUyMiUyQyUyMHJlcXVpcmVzX3NhZmV0eV9jaGVja2VyJTNERmFsc2UlMEEpJTBBJTBBcGlwZV90MmklMjAlM0QlMjBBdXRvUGlwZWxpbmVGb3JUZXh0MkltYWdlLmZyb21fcGlwZShwaXBlX2kyaSklMEFpbWFnZSUyMCUzRCUyMHBpcGVfdDJpKHByb21wdCkuaW1hZ2VzJTVCMCU1RA==",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> AutoPipelineForText2Image, AutoPipelineForImage2Image
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
<span class="hljs-meta">... </span> <span class="hljs-string">&quot;stable-diffusion-v1-5/stable-diffusion-v1-5&quot;</span>, requires_safety_checker=<span class="hljs-literal">False</span>
<span class="hljs-meta">... </span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe_t2i = AutoPipelineForText2Image.from_pipe(pipe_i2i)
<span class="hljs-meta">&gt;&gt;&gt; </span>image = pipe_t2i(prompt).images[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){_(n.$$.fragment)},l(p){b(n.$$.fragment,p)},m(p,s){y(n,p,s),g=!0},p:Y,i(p){g||(v(n.$$.fragment,p),g=!0)},o(p){w(n.$$.fragment,p),g=!1},d(p){x(n,p)}}}function $n(C){let n,g="If you get the error message below, you need to finetune the weights for your downstream task:",p,s,f;return s=new q({props:{code:"U29tZSUyMHdlaWdodHMlMjBvZiUyMFVOZXQyRENvbmRpdGlvbk1vZGVsJTIwd2VyZSUyMG5vdCUyMGluaXRpYWxpemVkJTIwZnJvbSUyMHRoZSUyMG1vZGVsJTIwY2hlY2twb2ludCUyMGF0JTIwc3RhYmxlLWRpZmZ1c2lvbi12MS01JTJGc3RhYmxlLWRpZmZ1c2lvbi12MS01JTIwYW5kJTIwYXJlJTIwbmV3bHklMjBpbml0aWFsaXplZCUyMGJlY2F1c2UlMjB0aGUlMjBzaGFwZXMlMjBkaWQlMjBub3QlMjBtYXRjaCUzQSUwQS0lMjBjb252X2luLndlaWdodCUzQSUyMGZvdW5kJTIwc2hhcGUlMjB0b3JjaC5TaXplKCU1QjMyMCUyQyUyMDQlMkMlMjAzJTJDJTIwMyU1RCklMjBpbiUyMHRoZSUyMGNoZWNrcG9pbnQlMjBhbmQlMjB0b3JjaC5TaXplKCU1QjMyMCUyQyUyMDklMkMlMjAzJTJDJTIwMyU1RCklMjBpbiUyMHRoZSUyMG1vZGVsJTIwaW5zdGFudGlhdGVkJTBBWW91JTIwc2hvdWxkJTIwcHJvYmFibHklMjBUUkFJTiUyMHRoaXMlMjBtb2RlbCUyMG9uJTIwYSUyMGRvd24tc3RyZWFtJTIwdGFzayUyMHRvJTIwYmUlMjBhYmxlJTIwdG8lMjB1c2UlMjBpdCUyMGZvciUyMHByZWRpY3Rpb25zJTIwYW5kJTIwaW5mZXJlbmNlLg==",highlighted:`Some weights of UNet2DConditionModel were not initialized from the model checkpoint <span class="hljs-built_in">at</span> stable-<span class="hljs-keyword">diffusion-v1-5/stable-diffusion-v1-5 </span><span class="hljs-keyword">and </span>are newly initialized <span class="hljs-keyword">because </span>the <span class="hljs-keyword">shapes </span><span class="hljs-keyword">did </span>not match:
- conv_in.weight: found <span class="hljs-keyword">shape </span>torch.Size([<span class="hljs-number">320</span>, <span class="hljs-number">4</span>, <span class="hljs-number">3</span>, <span class="hljs-number">3</span>]) in the checkpoint <span class="hljs-keyword">and </span>torch.Size([<span class="hljs-number">320</span>, <span class="hljs-number">9</span>, <span class="hljs-number">3</span>, <span class="hljs-number">3</span>]) in the model <span class="hljs-keyword">instantiated
</span>You <span class="hljs-keyword">should </span>probably TRAIN this model on a down-stream task to <span class="hljs-keyword">be </span>able to use it for predictions <span class="hljs-keyword">and </span>inference.`,wrap:!1}}),{c(){n=r("p"),n.textContent=g,p=i(),_(s.$$.fragment)},l(e){n=l(e,"P",{"data-svelte-h":!0}),d(n)!=="svelte-xueb0m"&&(n.textContent=g),p=a(e),b(s.$$.fragment,e)},m(e,u){h(e,n,u),h(e,p,u),y(s,e,u),f=!0},p:Y,i(e){f||(v(s.$$.fragment,e),f=!0)},o(e){w(s.$$.fragment,e),f=!1},d(e){e&&(c(n),c(p)),x(s,e)}}}function Tn(C){let n,g="Examples:",p,s,f;return s=new q({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMEF1dG9QaXBlbGluZUZvckltYWdlMkltYWdlJTBBJTBBcGlwZWxpbmUlMjAlM0QlMjBBdXRvUGlwZWxpbmVGb3JJbWFnZTJJbWFnZS5mcm9tX3ByZXRyYWluZWQoJTIyc3RhYmxlLWRpZmZ1c2lvbi12MS01JTJGc3RhYmxlLWRpZmZ1c2lvbi12MS01JTIyKSUwQWltYWdlJTIwJTNEJTIwcGlwZWxpbmUocHJvbXB0JTJDJTIwaW1hZ2UpLmltYWdlcyU1QjAlNUQ=",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> AutoPipelineForImage2Image
<span class="hljs-meta">&gt;&gt;&gt; </span>pipeline = AutoPipelineForImage2Image.from_pretrained(<span class="hljs-string">&quot;stable-diffusion-v1-5/stable-diffusion-v1-5&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>image = pipeline(prompt, image).images[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){n=r("p"),n.textContent=g,p=i(),_(s.$$.fragment)},l(e){n=l(e,"P",{"data-svelte-h":!0}),d(n)!=="svelte-kvfsh7"&&(n.textContent=g),p=a(e),b(s.$$.fragment,e)},m(e,u){h(e,n,u),h(e,p,u),y(s,e,u),f=!0},p:Y,i(e){f||(v(s.$$.fragment,e),f=!0)},o(e){w(s.$$.fragment,e),f=!1},d(e){e&&(c(n),c(p)),x(s,e)}}}function In(C){let n,g="Examples:",p,s,f;return s=new q({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMEF1dG9QaXBlbGluZUZvclRleHQySW1hZ2UlMkMlMjBBdXRvUGlwZWxpbmVGb3JJbWFnZTJJbWFnZSUwQSUwQXBpcGVfdDJpJTIwJTNEJTIwQXV0b1BpcGVsaW5lRm9yVGV4dDJJbWFnZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyc3RhYmxlLWRpZmZ1c2lvbi12MS01JTJGc3RhYmxlLWRpZmZ1c2lvbi12MS01JTIyJTJDJTIwcmVxdWlyZXNfc2FmZXR5X2NoZWNrZXIlM0RGYWxzZSUwQSklMEElMEFwaXBlX2kyaSUyMCUzRCUyMEF1dG9QaXBlbGluZUZvckltYWdlMkltYWdlLmZyb21fcGlwZShwaXBlX3QyaSklMEFpbWFnZSUyMCUzRCUyMHBpcGVfaTJpKHByb21wdCUyQyUyMGltYWdlKS5pbWFnZXMlNUIwJTVE",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> AutoPipelineForText2Image, AutoPipelineForImage2Image
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe_t2i = AutoPipelineForText2Image.from_pretrained(
<span class="hljs-meta">... </span> <span class="hljs-string">&quot;stable-diffusion-v1-5/stable-diffusion-v1-5&quot;</span>, requires_safety_checker=<span class="hljs-literal">False</span>
<span class="hljs-meta">... </span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe_i2i = AutoPipelineForImage2Image.from_pipe(pipe_t2i)
<span class="hljs-meta">&gt;&gt;&gt; </span>image = pipe_i2i(prompt, image).images[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){n=r("p"),n.textContent=g,p=i(),_(s.$$.fragment)},l(e){n=l(e,"P",{"data-svelte-h":!0}),d(n)!=="svelte-kvfsh7"&&(n.textContent=g),p=a(e),b(s.$$.fragment,e)},m(e,u){h(e,n,u),h(e,p,u),y(s,e,u),f=!0},p:Y,i(e){f||(v(s.$$.fragment,e),f=!0)},o(e){w(s.$$.fragment,e),f=!1},d(e){e&&(c(n),c(p)),x(s,e)}}}function kn(C){let n,g="If you get the error message below, you need to finetune the weights for your downstream task:",p,s,f;return s=new q({props:{code:"U29tZSUyMHdlaWdodHMlMjBvZiUyMFVOZXQyRENvbmRpdGlvbk1vZGVsJTIwd2VyZSUyMG5vdCUyMGluaXRpYWxpemVkJTIwZnJvbSUyMHRoZSUyMG1vZGVsJTIwY2hlY2twb2ludCUyMGF0JTIwc3RhYmxlLWRpZmZ1c2lvbi12MS01JTJGc3RhYmxlLWRpZmZ1c2lvbi12MS01JTIwYW5kJTIwYXJlJTIwbmV3bHklMjBpbml0aWFsaXplZCUyMGJlY2F1c2UlMjB0aGUlMjBzaGFwZXMlMjBkaWQlMjBub3QlMjBtYXRjaCUzQSUwQS0lMjBjb252X2luLndlaWdodCUzQSUyMGZvdW5kJTIwc2hhcGUlMjB0b3JjaC5TaXplKCU1QjMyMCUyQyUyMDQlMkMlMjAzJTJDJTIwMyU1RCklMjBpbiUyMHRoZSUyMGNoZWNrcG9pbnQlMjBhbmQlMjB0b3JjaC5TaXplKCU1QjMyMCUyQyUyMDklMkMlMjAzJTJDJTIwMyU1RCklMjBpbiUyMHRoZSUyMG1vZGVsJTIwaW5zdGFudGlhdGVkJTBBWW91JTIwc2hvdWxkJTIwcHJvYmFibHklMjBUUkFJTiUyMHRoaXMlMjBtb2RlbCUyMG9uJTIwYSUyMGRvd24tc3RyZWFtJTIwdGFzayUyMHRvJTIwYmUlMjBhYmxlJTIwdG8lMjB1c2UlMjBpdCUyMGZvciUyMHByZWRpY3Rpb25zJTIwYW5kJTIwaW5mZXJlbmNlLg==",highlighted:`Some weights of UNet2DConditionModel were not initialized from the model checkpoint <span class="hljs-built_in">at</span> stable-<span class="hljs-keyword">diffusion-v1-5/stable-diffusion-v1-5 </span><span class="hljs-keyword">and </span>are newly initialized <span class="hljs-keyword">because </span>the <span class="hljs-keyword">shapes </span><span class="hljs-keyword">did </span>not match:
- conv_in.weight: found <span class="hljs-keyword">shape </span>torch.Size([<span class="hljs-number">320</span>, <span class="hljs-number">4</span>, <span class="hljs-number">3</span>, <span class="hljs-number">3</span>]) in the checkpoint <span class="hljs-keyword">and </span>torch.Size([<span class="hljs-number">320</span>, <span class="hljs-number">9</span>, <span class="hljs-number">3</span>, <span class="hljs-number">3</span>]) in the model <span class="hljs-keyword">instantiated
</span>You <span class="hljs-keyword">should </span>probably TRAIN this model on a down-stream task to <span class="hljs-keyword">be </span>able to use it for predictions <span class="hljs-keyword">and </span>inference.`,wrap:!1}}),{c(){n=r("p"),n.textContent=g,p=i(),_(s.$$.fragment)},l(e){n=l(e,"P",{"data-svelte-h":!0}),d(n)!=="svelte-xueb0m"&&(n.textContent=g),p=a(e),b(s.$$.fragment,e)},m(e,u){h(e,n,u),h(e,p,u),y(s,e,u),f=!0},p:Y,i(e){f||(v(s.$$.fragment,e),f=!0)},o(e){w(s.$$.fragment,e),f=!1},d(e){e&&(c(n),c(p)),x(s,e)}}}function Un(C){let n,g="Examples:",p,s,f;return s=new q({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMEF1dG9QaXBlbGluZUZvcklucGFpbnRpbmclMEElMEFwaXBlbGluZSUyMCUzRCUyMEF1dG9QaXBlbGluZUZvcklucGFpbnRpbmcuZnJvbV9wcmV0cmFpbmVkKCUyMnN0YWJsZS1kaWZmdXNpb24tdjEtNSUyRnN0YWJsZS1kaWZmdXNpb24tdjEtNSUyMiklMEFpbWFnZSUyMCUzRCUyMHBpcGVsaW5lKHByb21wdCUyQyUyMGltYWdlJTNEaW5pdF9pbWFnZSUyQyUyMG1hc2tfaW1hZ2UlM0RtYXNrX2ltYWdlKS5pbWFnZXMlNUIwJTVE",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> AutoPipelineForInpainting
<span class="hljs-meta">&gt;&gt;&gt; </span>pipeline = AutoPipelineForInpainting.from_pretrained(<span class="hljs-string">&quot;stable-diffusion-v1-5/stable-diffusion-v1-5&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>image = pipeline(prompt, image=init_image, mask_image=mask_image).images[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){n=r("p"),n.textContent=g,p=i(),_(s.$$.fragment)},l(e){n=l(e,"P",{"data-svelte-h":!0}),d(n)!=="svelte-kvfsh7"&&(n.textContent=g),p=a(e),b(s.$$.fragment,e)},m(e,u){h(e,n,u),h(e,p,u),y(s,e,u),f=!0},p:Y,i(e){f||(v(s.$$.fragment,e),f=!0)},o(e){w(s.$$.fragment,e),f=!1},d(e){e&&(c(n),c(p)),x(s,e)}}}function Cn(C){let n,g="Examples:",p,s,f;return s=new q({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMEF1dG9QaXBlbGluZUZvclRleHQySW1hZ2UlMkMlMjBBdXRvUGlwZWxpbmVGb3JJbnBhaW50aW5nJTBBJTBBcGlwZV90MmklMjAlM0QlMjBBdXRvUGlwZWxpbmVGb3JUZXh0MkltYWdlLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJEZWVwRmxveWQlMkZJRi1JLVhMLXYxLjAlMjIlMkMlMjByZXF1aXJlc19zYWZldHlfY2hlY2tlciUzREZhbHNlJTBBKSUwQSUwQXBpcGVfaW5wYWludCUyMCUzRCUyMEF1dG9QaXBlbGluZUZvcklucGFpbnRpbmcuZnJvbV9waXBlKHBpcGVfdDJpKSUwQWltYWdlJTIwJTNEJTIwcGlwZV9pbnBhaW50KHByb21wdCUyQyUyMGltYWdlJTNEaW5pdF9pbWFnZSUyQyUyMG1hc2tfaW1hZ2UlM0RtYXNrX2ltYWdlKS5pbWFnZXMlNUIwJTVE",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> AutoPipelineForText2Image, AutoPipelineForInpainting
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe_t2i = AutoPipelineForText2Image.from_pretrained(
<span class="hljs-meta">... </span> <span class="hljs-string">&quot;DeepFloyd/IF-I-XL-v1.0&quot;</span>, requires_safety_checker=<span class="hljs-literal">False</span>
<span class="hljs-meta">... </span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe_inpaint = AutoPipelineForInpainting.from_pipe(pipe_t2i)
<span class="hljs-meta">&gt;&gt;&gt; </span>image = pipe_inpaint(prompt, image=init_image, mask_image=mask_image).images[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){n=r("p"),n.textContent=g,p=i(),_(s.$$.fragment)},l(e){n=l(e,"P",{"data-svelte-h":!0}),d(n)!=="svelte-kvfsh7"&&(n.textContent=g),p=a(e),b(s.$$.fragment,e)},m(e,u){h(e,n,u),h(e,p,u),y(s,e,u),f=!0},p:Y,i(e){f||(v(s.$$.fragment,e),f=!0)},o(e){w(s.$$.fragment,e),f=!1},d(e){e&&(c(n),c(p)),x(s,e)}}}function Pn(C){let n,g,p,s,f,e,u,rt,ie,Mo="The <code>AutoPipeline</code> is designed to make it easy to load a checkpoint for a task without needing to know the specific pipeline class. Based on the task, the <code>AutoPipeline</code> automatically retrieves the correct pipeline class from the checkpoint <code>model_index.json</code> file.",lt,E,$o='<p>Check out the <a href="../../tutorials/autopipeline">AutoPipeline</a> tutorial to learn how to use this API!</p>',pt,ae,dt,P,se,yt,we,To=`<a href="/docs/diffusers/pr_12595/en/api/pipelines/auto_pipeline#diffusers.AutoPipelineForText2Image">AutoPipelineForText2Image</a> is a generic pipeline class that instantiates a text-to-image pipeline class. The
specific underlying pipeline class is automatically selected from either the
<a href="/docs/diffusers/pr_12595/en/api/pipelines/auto_pipeline#diffusers.AutoPipelineForText2Image.from_pretrained">from_pretrained()</a> or <a href="/docs/diffusers/pr_12595/en/api/pipelines/auto_pipeline#diffusers.AutoPipelineForText2Image.from_pipe">from_pipe()</a> methods.`,vt,xe,Io="This class cannot be instantiated using <code>__init__()</code> (throws an error).",wt,Me,ko="Class attributes:",xt,$e,Uo=`<li><strong>config_name</strong> (<code>str</code>) — The configuration filename that stores the class and module names of all the
diffusion pipeline’s components.</li>`,Mt,M,re,$t,Te,Co="Instantiates a text-to-image Pytorch diffusion pipeline from pretrained pipeline weight.",Tt,Ie,Po="The from_pretrained() method takes care of returning the correct pipeline class instance by:",It,ke,jo=`<li>Detect the pipeline class of the pretrained_model_or_path based on the _class_name property of its
config object</li> <li>Find the text-to-image pipeline linked to the pipeline class using pattern matching on pipeline class
name.</li>`,kt,Ue,Fo='If a <code>controlnet</code> argument is passed, it will instantiate a <a href="/docs/diffusers/pr_12595/en/api/pipelines/controlnet#diffusers.StableDiffusionControlNetPipeline">StableDiffusionControlNetPipeline</a> object.',Ut,Ce,Jo="The pipeline is set in evaluation mode (<code>model.eval()</code>) by default.",Ct,z,Pt,le,Zo=`<p>&gt; To use private or <a href="https://huggingface.co/docs/hub/models-gated#gated-models" rel="nofollow">gated</a> models, log-in
with <code>hf &gt; auth login</code>.</p>`,jt,D,Ft,G,pe,Jt,Pe,Ao="Instantiates a text-to-image Pytorch diffusion pipeline from another instantiated diffusion pipeline class.",Zt,je,Go=`The from_pipe() method takes care of returning the correct pipeline class instance by finding the text-to-image
pipeline linked to the pipeline class using pattern matching on pipeline class name.`,At,Fe,Wo=`All the modules the pipeline contains will be used to initialize the new pipeline without reallocating
additional memory.`,Gt,Je,Bo="The pipeline is set in evaluation mode (<code>model.eval()</code>) by default.",Wt,N,ct,de,mt,j,ce,Bt,Ze,Ho=`<a href="/docs/diffusers/pr_12595/en/api/pipelines/auto_pipeline#diffusers.AutoPipelineForImage2Image">AutoPipelineForImage2Image</a> is a generic pipeline class that instantiates an image-to-image pipeline class. The
specific underlying pipeline class is automatically selected from either the
<a href="/docs/diffusers/pr_12595/en/api/pipelines/auto_pipeline#diffusers.AutoPipelineForImage2Image.from_pretrained">from_pretrained()</a> or <a href="/docs/diffusers/pr_12595/en/api/pipelines/auto_pipeline#diffusers.AutoPipelineForImage2Image.from_pipe">from_pipe()</a> methods.`,Ht,Ae,Ro="This class cannot be instantiated using <code>__init__()</code> (throws an error).",Rt,Ge,So="Class attributes:",St,We,Lo=`<li><strong>config_name</strong> (<code>str</code>) — The configuration filename that stores the class and module names of all the
diffusion pipeline’s components.</li>`,Lt,$,me,Xt,Be,Xo="Instantiates a image-to-image Pytorch diffusion pipeline from pretrained pipeline weight.",Vt,He,Vo="The from_pretrained() method takes care of returning the correct pipeline class instance by:",Qt,Re,Qo=`<li>Detect the pipeline class of the pretrained_model_or_path based on the _class_name property of its
config object</li> <li>Find the image-to-image pipeline linked to the pipeline class using pattern matching on pipeline class
name.</li>`,Yt,Se,Yo=`If a <code>controlnet</code> argument is passed, it will instantiate a <a href="/docs/diffusers/pr_12595/en/api/pipelines/controlnet#diffusers.StableDiffusionControlNetImg2ImgPipeline">StableDiffusionControlNetImg2ImgPipeline</a>
object.`,qt,Le,qo="The pipeline is set in evaluation mode (<code>model.eval()</code>) by default.",Et,K,zt,fe,Eo=`<p>&gt; To use private or <a href="https://huggingface.co/docs/hub/models-gated#gated-models" rel="nofollow">gated</a> models, log-in
with <code>hf &gt; auth login</code>.</p>`,Dt,O,Nt,W,ue,Kt,Xe,zo="Instantiates a image-to-image Pytorch diffusion pipeline from another instantiated diffusion pipeline class.",Ot,Ve,Do=`The from_pipe() method takes care of returning the correct pipeline class instance by finding the
image-to-image pipeline linked to the pipeline class using pattern matching on pipeline class name.`,eo,Qe,No=`All the modules the pipeline contains will be used to initialize the new pipeline without reallocating
additional memory.`,to,Ye,Ko="The pipeline is set in evaluation mode (<code>model.eval()</code>) by default.",oo,ee,ft,he,ut,F,ge,no,qe,Oo=`<a href="/docs/diffusers/pr_12595/en/api/pipelines/auto_pipeline#diffusers.AutoPipelineForInpainting">AutoPipelineForInpainting</a> is a generic pipeline class that instantiates an inpainting pipeline class. The
specific underlying pipeline class is automatically selected from either the
<a href="/docs/diffusers/pr_12595/en/api/pipelines/auto_pipeline#diffusers.AutoPipelineForInpainting.from_pretrained">from_pretrained()</a> or <a href="/docs/diffusers/pr_12595/en/api/pipelines/auto_pipeline#diffusers.AutoPipelineForInpainting.from_pipe">from_pipe()</a> methods.`,io,Ee,en="This class cannot be instantiated using <code>__init__()</code> (throws an error).",ao,ze,tn="Class attributes:",so,De,on=`<li><strong>config_name</strong> (<code>str</code>) — The configuration filename that stores the class and module names of all the
diffusion pipeline’s components.</li>`,ro,T,_e,lo,Ne,nn="Instantiates a inpainting Pytorch diffusion pipeline from pretrained pipeline weight.",po,Ke,an="The from_pretrained() method takes care of returning the correct pipeline class instance by:",co,Oe,sn=`<li>Detect the pipeline class of the pretrained_model_or_path based on the _class_name property of its
config object</li> <li>Find the inpainting pipeline linked to the pipeline class using pattern matching on pipeline class name.</li>`,mo,et,rn=`If a <code>controlnet</code> argument is passed, it will instantiate a <a href="/docs/diffusers/pr_12595/en/api/pipelines/controlnet#diffusers.StableDiffusionControlNetInpaintPipeline">StableDiffusionControlNetInpaintPipeline</a>
object.`,fo,tt,ln="The pipeline is set in evaluation mode (<code>model.eval()</code>) by default.",uo,te,ho,be,pn=`<p>&gt; To use private or <a href="https://huggingface.co/docs/hub/models-gated#gated-models" rel="nofollow">gated</a> models, log-in
with <code>hf &gt; auth login</code>.</p>`,go,oe,_o,B,ye,bo,ot,dn="Instantiates a inpainting Pytorch diffusion pipeline from another instantiated diffusion pipeline class.",yo,nt,cn=`The from_pipe() method takes care of returning the correct pipeline class instance by finding the inpainting
pipeline linked to the pipeline class using pattern matching on pipeline class name.`,vo,it,mn=`All the modules the pipeline class contain will be used to initialize the new pipeline without reallocating
additional memory.`,wo,at,fn="The pipeline is set in evaluation mode (<code>model.eval()</code>) by default.",xo,ne,ht,ve,gt,st,_t;return f=new yn({props:{containerStyle:"float: right; margin-left: 10px; display: inline-flex; position: relative; z-index: 10;"}}),u=new bt({props:{title:"AutoPipeline",local:"autopipeline",headingTag:"h1"}}),ae=new bt({props:{title:"AutoPipelineForText2Image",local:"diffusers.AutoPipelineForText2Image",headingTag:"h2"}}),se=new V({props:{name:"class diffusers.AutoPipelineForText2Image",anchor:"diffusers.AutoPipelineForText2Image",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/diffusers/blob/vr_12595/src/diffusers/pipelines/auto_pipeline.py#L289"}}),re=new V({props:{name:"from_pretrained",anchor:"diffusers.AutoPipelineForText2Image.from_pretrained",parameters:[{name:"pretrained_model_or_path",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.AutoPipelineForText2Image.from_pretrained.pretrained_model_or_path",description:`<strong>pretrained_model_or_path</strong> (<code>str</code> or <code>os.PathLike</code>, <em>optional</em>) &#x2014;
Can be either:</p>
<ul>
<li>A string, the <em>repo id</em> (for example <code>CompVis/ldm-text2im-large-256</code>) of a pretrained pipeline
hosted on the Hub.</li>
<li>A path to a <em>directory</em> (for example <code>./my_pipeline_directory/</code>) containing pipeline weights
saved using
<a href="/docs/diffusers/pr_12595/en/api/pipelines/overview#diffusers.DiffusionPipeline.save_pretrained">save_pretrained()</a>.</li>
</ul>`,name:"pretrained_model_or_path"},{anchor:"diffusers.AutoPipelineForText2Image.from_pretrained.torch_dtype",description:`<strong>torch_dtype</strong> (<code>torch.dtype</code>, <em>optional</em>) &#x2014;
Override the default <code>torch.dtype</code> and load the model with another dtype.`,name:"torch_dtype"},{anchor:"diffusers.AutoPipelineForText2Image.from_pretrained.force_download",description:`<strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014;
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.`,name:"force_download"},{anchor:"diffusers.AutoPipelineForText2Image.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<code>Union[str, os.PathLike]</code>, <em>optional</em>) &#x2014;
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
is not used.`,name:"cache_dir"},{anchor:"diffusers.AutoPipelineForText2Image.from_pretrained.proxies",description:`<strong>proxies</strong> (<code>Dict[str, str]</code>, <em>optional</em>) &#x2014;
A dictionary of proxy servers to use by protocol or endpoint, for example, <code>{&apos;http&apos;: &apos;foo.bar:3128&apos;, &apos;http://hostname&apos;: &apos;foo.bar:4012&apos;}</code>. The proxies are used on each request.`,name:"proxies"},{anchor:"diffusers.AutoPipelineForText2Image.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<code>bool</code>,</strong> <em>optional</em>, defaults to <code>False</code>) &#x2014;
Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"diffusers.AutoPipelineForText2Image.from_pretrained.local_files_only",description:`<strong>local_files_only</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014;
Whether to only load local model weights and configuration files or not. If set to <code>True</code>, the model
won&#x2019;t be downloaded from the Hub.`,name:"local_files_only"},{anchor:"diffusers.AutoPipelineForText2Image.from_pretrained.token",description:`<strong>token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) &#x2014;
The token to use as HTTP bearer authorization for remote files. If <code>True</code>, the token generated from
<code>diffusers-cli login</code> (stored in <code>~/.huggingface</code>) is used.`,name:"token"},{anchor:"diffusers.AutoPipelineForText2Image.from_pretrained.revision",description:`<strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;main&quot;</code>) &#x2014;
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
allowed by Git.`,name:"revision"},{anchor:"diffusers.AutoPipelineForText2Image.from_pretrained.custom_revision",description:`<strong>custom_revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;main&quot;</code>) &#x2014;
The specific model version to use. It can be a branch name, a tag name, or a commit id similar to
<code>revision</code> when loading a custom pipeline from the Hub. It can be a &#x1F917; Diffusers version when loading a
custom pipeline from GitHub, otherwise it defaults to <code>&quot;main&quot;</code> when loading from the Hub.`,name:"custom_revision"},{anchor:"diffusers.AutoPipelineForText2Image.from_pretrained.mirror",description:`<strong>mirror</strong> (<code>str</code>, <em>optional</em>) &#x2014;
Mirror source to resolve accessibility issues if you&#x2019;re downloading a model in China. We do not
guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
information.`,name:"mirror"},{anchor:"diffusers.AutoPipelineForText2Image.from_pretrained.device_map",description:`<strong>device_map</strong> (<code>str</code> or <code>Dict[str, Union[int, str, torch.device]]</code>, <em>optional</em>) &#x2014;
A map that specifies where each submodule should go. It doesn&#x2019;t need to be defined for each
parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the
same device.</p>
<p>Set <code>device_map=&quot;auto&quot;</code> to have &#x1F917; Accelerate automatically compute the most optimized <code>device_map</code>. For
more information about each option see <a href="https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map" rel="nofollow">designing a device
map</a>.`,name:"device_map"},{anchor:"diffusers.AutoPipelineForText2Image.from_pretrained.max_memory",description:`<strong>max_memory</strong> (<code>Dict</code>, <em>optional</em>) &#x2014;
A dictionary device identifier for the maximum memory. Will default to the maximum memory available for
each GPU and the available CPU RAM if unset.`,name:"max_memory"},{anchor:"diffusers.AutoPipelineForText2Image.from_pretrained.offload_folder",description:`<strong>offload_folder</strong> (<code>str</code> or <code>os.PathLike</code>, <em>optional</em>) &#x2014;
The path to offload weights if device_map contains the value <code>&quot;disk&quot;</code>.`,name:"offload_folder"},{anchor:"diffusers.AutoPipelineForText2Image.from_pretrained.offload_state_dict",description:`<strong>offload_state_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014;
If <code>True</code>, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if
the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to <code>True</code>
when there is some disk offload.`,name:"offload_state_dict"},{anchor:"diffusers.AutoPipelineForText2Image.from_pretrained.low_cpu_mem_usage",description:`<strong>low_cpu_mem_usage</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code> if torch version &gt;= 1.9.0 else <code>False</code>) &#x2014;
Speed up model loading only loading the pretrained weights and not initializing the weights. This also
tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
Only supported for PyTorch &gt;= 1.9.0. If you are using an older version of PyTorch, setting this
argument to <code>True</code> will raise an error.`,name:"low_cpu_mem_usage"},{anchor:"diffusers.AutoPipelineForText2Image.from_pretrained.use_safetensors",description:`<strong>use_safetensors</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>None</code>) &#x2014;
If set to <code>None</code>, the safetensors weights are downloaded if they&#x2019;re available <strong>and</strong> if the
safetensors library is installed. If set to <code>True</code>, the model is forcibly loaded from safetensors
weights. If set to <code>False</code>, safetensors weights are not loaded.`,name:"use_safetensors"},{anchor:"diffusers.AutoPipelineForText2Image.from_pretrained.kwargs",description:`<strong>kwargs</strong> (remaining dictionary of keyword arguments, <em>optional</em>) &#x2014;
Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline
class). The overwritten components are passed directly to the pipelines <code>__init__</code> method. See example
below for more information.`,name:"kwargs"},{anchor:"diffusers.AutoPipelineForText2Image.from_pretrained.variant",description:`<strong>variant</strong> (<code>str</code>, <em>optional</em>) &#x2014;
Load weights from a specified variant filename such as <code>&quot;fp16&quot;</code> or <code>&quot;ema&quot;</code>. This is ignored when
loading <code>from_flax</code>.`,name:"variant"}],source:"https://github.com/huggingface/diffusers/blob/vr_12595/src/diffusers/pipelines/auto_pipeline.py#L314"}}),z=new Q({props:{anchor:"diffusers.AutoPipelineForText2Image.from_pretrained.example",$$slots:{default:[wn]},$$scope:{ctx:C}}}),D=new Q({props:{anchor:"diffusers.AutoPipelineForText2Image.from_pretrained.example-2",$$slots:{default:[xn]},$$scope:{ctx:C}}}),pe=new V({props:{name:"from_pipe",anchor:"diffusers.AutoPipelineForText2Image.from_pipe",parameters:[{name:"pipeline",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.AutoPipelineForText2Image.from_pipe.pipeline",description:`<strong>pipeline</strong> (<code>DiffusionPipeline</code>) &#x2014;
an instantiated <code>DiffusionPipeline</code> object`,name:"pipeline"}],source:"https://github.com/huggingface/diffusers/blob/vr_12595/src/diffusers/pipelines/auto_pipeline.py#L462"}}),N=new Q({props:{anchor:"diffusers.AutoPipelineForText2Image.from_pipe.example",$$slots:{default:[Mn]},$$scope:{ctx:C}}}),de=new bt({props:{title:"AutoPipelineForImage2Image",local:"diffusers.AutoPipelineForImage2Image",headingTag:"h2"}}),ce=new V({props:{name:"class diffusers.AutoPipelineForImage2Image",anchor:"diffusers.AutoPipelineForImage2Image",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/diffusers/blob/vr_12595/src/diffusers/pipelines/auto_pipeline.py#L579"}}),me=new V({props:{name:"from_pretrained",anchor:"diffusers.AutoPipelineForImage2Image.from_pretrained",parameters:[{name:"pretrained_model_or_path",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.AutoPipelineForImage2Image.from_pretrained.pretrained_model_or_path",description:`<strong>pretrained_model_or_path</strong> (<code>str</code> or <code>os.PathLike</code>, <em>optional</em>) &#x2014;
Can be either:</p>
<ul>
<li>A string, the <em>repo id</em> (for example <code>CompVis/ldm-text2im-large-256</code>) of a pretrained pipeline
hosted on the Hub.</li>
<li>A path to a <em>directory</em> (for example <code>./my_pipeline_directory/</code>) containing pipeline weights
saved using
<a href="/docs/diffusers/pr_12595/en/api/pipelines/overview#diffusers.DiffusionPipeline.save_pretrained">save_pretrained()</a>.</li>
</ul>`,name:"pretrained_model_or_path"},{anchor:"diffusers.AutoPipelineForImage2Image.from_pretrained.torch_dtype",description:`<strong>torch_dtype</strong> (<code>str</code> or <code>torch.dtype</code>, <em>optional</em>) &#x2014;
Override the default <code>torch.dtype</code> and load the model with another dtype.`,name:"torch_dtype"},{anchor:"diffusers.AutoPipelineForImage2Image.from_pretrained.force_download",description:`<strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014;
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.`,name:"force_download"},{anchor:"diffusers.AutoPipelineForImage2Image.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<code>Union[str, os.PathLike]</code>, <em>optional</em>) &#x2014;
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
is not used.`,name:"cache_dir"},{anchor:"diffusers.AutoPipelineForImage2Image.from_pretrained.proxies",description:`<strong>proxies</strong> (<code>Dict[str, str]</code>, <em>optional</em>) &#x2014;
A dictionary of proxy servers to use by protocol or endpoint, for example, <code>{&apos;http&apos;: &apos;foo.bar:3128&apos;, &apos;http://hostname&apos;: &apos;foo.bar:4012&apos;}</code>. The proxies are used on each request.`,name:"proxies"},{anchor:"diffusers.AutoPipelineForImage2Image.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<code>bool</code>,</strong> <em>optional</em>, defaults to <code>False</code>) &#x2014;
Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"diffusers.AutoPipelineForImage2Image.from_pretrained.local_files_only",description:`<strong>local_files_only</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014;
Whether to only load local model weights and configuration files or not. If set to <code>True</code>, the model
won&#x2019;t be downloaded from the Hub.`,name:"local_files_only"},{anchor:"diffusers.AutoPipelineForImage2Image.from_pretrained.token",description:`<strong>token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) &#x2014;
The token to use as HTTP bearer authorization for remote files. If <code>True</code>, the token generated from
<code>diffusers-cli login</code> (stored in <code>~/.huggingface</code>) is used.`,name:"token"},{anchor:"diffusers.AutoPipelineForImage2Image.from_pretrained.revision",description:`<strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;main&quot;</code>) &#x2014;
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
allowed by Git.`,name:"revision"},{anchor:"diffusers.AutoPipelineForImage2Image.from_pretrained.custom_revision",description:`<strong>custom_revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;main&quot;</code>) &#x2014;
The specific model version to use. It can be a branch name, a tag name, or a commit id similar to
<code>revision</code> when loading a custom pipeline from the Hub. It can be a &#x1F917; Diffusers version when loading a
custom pipeline from GitHub, otherwise it defaults to <code>&quot;main&quot;</code> when loading from the Hub.`,name:"custom_revision"},{anchor:"diffusers.AutoPipelineForImage2Image.from_pretrained.mirror",description:`<strong>mirror</strong> (<code>str</code>, <em>optional</em>) &#x2014;
Mirror source to resolve accessibility issues if you&#x2019;re downloading a model in China. We do not
guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
information.`,name:"mirror"},{anchor:"diffusers.AutoPipelineForImage2Image.from_pretrained.device_map",description:`<strong>device_map</strong> (<code>str</code> or <code>Dict[str, Union[int, str, torch.device]]</code>, <em>optional</em>) &#x2014;
A map that specifies where each submodule should go. It doesn&#x2019;t need to be defined for each
parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the
same device.</p>
<p>Set <code>device_map=&quot;auto&quot;</code> to have &#x1F917; Accelerate automatically compute the most optimized <code>device_map</code>. For
more information about each option see <a href="https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map" rel="nofollow">designing a device
map</a>.`,name:"device_map"},{anchor:"diffusers.AutoPipelineForImage2Image.from_pretrained.max_memory",description:`<strong>max_memory</strong> (<code>Dict</code>, <em>optional</em>) &#x2014;
A dictionary device identifier for the maximum memory. Will default to the maximum memory available for
each GPU and the available CPU RAM if unset.`,name:"max_memory"},{anchor:"diffusers.AutoPipelineForImage2Image.from_pretrained.offload_folder",description:`<strong>offload_folder</strong> (<code>str</code> or <code>os.PathLike</code>, <em>optional</em>) &#x2014;
The path to offload weights if device_map contains the value <code>&quot;disk&quot;</code>.`,name:"offload_folder"},{anchor:"diffusers.AutoPipelineForImage2Image.from_pretrained.offload_state_dict",description:`<strong>offload_state_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014;
If <code>True</code>, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if
the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to <code>True</code>
when there is some disk offload.`,name:"offload_state_dict"},{anchor:"diffusers.AutoPipelineForImage2Image.from_pretrained.low_cpu_mem_usage",description:`<strong>low_cpu_mem_usage</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code> if torch version &gt;= 1.9.0 else <code>False</code>) &#x2014;
Speed up model loading only loading the pretrained weights and not initializing the weights. This also
tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
Only supported for PyTorch &gt;= 1.9.0. If you are using an older version of PyTorch, setting this
argument to <code>True</code> will raise an error.`,name:"low_cpu_mem_usage"},{anchor:"diffusers.AutoPipelineForImage2Image.from_pretrained.use_safetensors",description:`<strong>use_safetensors</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>None</code>) &#x2014;
If set to <code>None</code>, the safetensors weights are downloaded if they&#x2019;re available <strong>and</strong> if the
safetensors library is installed. If set to <code>True</code>, the model is forcibly loaded from safetensors
weights. If set to <code>False</code>, safetensors weights are not loaded.`,name:"use_safetensors"},{anchor:"diffusers.AutoPipelineForImage2Image.from_pretrained.kwargs",description:`<strong>kwargs</strong> (remaining dictionary of keyword arguments, <em>optional</em>) &#x2014;
Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline
class). The overwritten components are passed directly to the pipelines <code>__init__</code> method. See example
below for more information.`,name:"kwargs"},{anchor:"diffusers.AutoPipelineForImage2Image.from_pretrained.variant",description:`<strong>variant</strong> (<code>str</code>, <em>optional</em>) &#x2014;
Load weights from a specified variant filename such as <code>&quot;fp16&quot;</code> or <code>&quot;ema&quot;</code>. This is ignored when
loading <code>from_flax</code>.`,name:"variant"}],source:"https://github.com/huggingface/diffusers/blob/vr_12595/src/diffusers/pipelines/auto_pipeline.py#L604"}}),K=new Q({props:{anchor:"diffusers.AutoPipelineForImage2Image.from_pretrained.example",$$slots:{default:[$n]},$$scope:{ctx:C}}}),O=new Q({props:{anchor:"diffusers.AutoPipelineForImage2Image.from_pretrained.example-2",$$slots:{default:[Tn]},$$scope:{ctx:C}}}),ue=new V({props:{name:"from_pipe",anchor:"diffusers.AutoPipelineForImage2Image.from_pipe",parameters:[{name:"pipeline",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.AutoPipelineForImage2Image.from_pipe.pipeline",description:`<strong>pipeline</strong> (<code>DiffusionPipeline</code>) &#x2014;
an instantiated <code>DiffusionPipeline</code> object`,name:"pipeline"}],source:"https://github.com/huggingface/diffusers/blob/vr_12595/src/diffusers/pipelines/auto_pipeline.py#L763"}}),ee=new Q({props:{anchor:"diffusers.AutoPipelineForImage2Image.from_pipe.example",$$slots:{default:[In]},$$scope:{ctx:C}}}),he=new bt({props:{title:"AutoPipelineForInpainting",local:"diffusers.AutoPipelineForInpainting",headingTag:"h2"}}),ge=new V({props:{name:"class diffusers.AutoPipelineForInpainting",anchor:"diffusers.AutoPipelineForInpainting",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/diffusers/blob/vr_12595/src/diffusers/pipelines/auto_pipeline.py#L886"}}),_e=new V({props:{name:"from_pretrained",anchor:"diffusers.AutoPipelineForInpainting.from_pretrained",parameters:[{name:"pretrained_model_or_path",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.AutoPipelineForInpainting.from_pretrained.pretrained_model_or_path",description:`<strong>pretrained_model_or_path</strong> (<code>str</code> or <code>os.PathLike</code>, <em>optional</em>) &#x2014;
Can be either:</p>
<ul>
<li>A string, the <em>repo id</em> (for example <code>CompVis/ldm-text2im-large-256</code>) of a pretrained pipeline
hosted on the Hub.</li>
<li>A path to a <em>directory</em> (for example <code>./my_pipeline_directory/</code>) containing pipeline weights
saved using
<a href="/docs/diffusers/pr_12595/en/api/pipelines/overview#diffusers.DiffusionPipeline.save_pretrained">save_pretrained()</a>.</li>
</ul>`,name:"pretrained_model_or_path"},{anchor:"diffusers.AutoPipelineForInpainting.from_pretrained.torch_dtype",description:`<strong>torch_dtype</strong> (<code>str</code> or <code>torch.dtype</code>, <em>optional</em>) &#x2014;
Override the default <code>torch.dtype</code> and load the model with another dtype.`,name:"torch_dtype"},{anchor:"diffusers.AutoPipelineForInpainting.from_pretrained.force_download",description:`<strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014;
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.`,name:"force_download"},{anchor:"diffusers.AutoPipelineForInpainting.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<code>Union[str, os.PathLike]</code>, <em>optional</em>) &#x2014;
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
is not used.`,name:"cache_dir"},{anchor:"diffusers.AutoPipelineForInpainting.from_pretrained.proxies",description:`<strong>proxies</strong> (<code>Dict[str, str]</code>, <em>optional</em>) &#x2014;
A dictionary of proxy servers to use by protocol or endpoint, for example, <code>{&apos;http&apos;: &apos;foo.bar:3128&apos;, &apos;http://hostname&apos;: &apos;foo.bar:4012&apos;}</code>. The proxies are used on each request.`,name:"proxies"},{anchor:"diffusers.AutoPipelineForInpainting.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<code>bool</code>,</strong> <em>optional</em>, defaults to <code>False</code>) &#x2014;
Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"diffusers.AutoPipelineForInpainting.from_pretrained.local_files_only",description:`<strong>local_files_only</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014;
Whether to only load local model weights and configuration files or not. If set to <code>True</code>, the model
won&#x2019;t be downloaded from the Hub.`,name:"local_files_only"},{anchor:"diffusers.AutoPipelineForInpainting.from_pretrained.token",description:`<strong>token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) &#x2014;
The token to use as HTTP bearer authorization for remote files. If <code>True</code>, the token generated from
<code>diffusers-cli login</code> (stored in <code>~/.huggingface</code>) is used.`,name:"token"},{anchor:"diffusers.AutoPipelineForInpainting.from_pretrained.revision",description:`<strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;main&quot;</code>) &#x2014;
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
allowed by Git.`,name:"revision"},{anchor:"diffusers.AutoPipelineForInpainting.from_pretrained.custom_revision",description:`<strong>custom_revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;main&quot;</code>) &#x2014;
The specific model version to use. It can be a branch name, a tag name, or a commit id similar to
<code>revision</code> when loading a custom pipeline from the Hub. It can be a &#x1F917; Diffusers version when loading a
custom pipeline from GitHub, otherwise it defaults to <code>&quot;main&quot;</code> when loading from the Hub.`,name:"custom_revision"},{anchor:"diffusers.AutoPipelineForInpainting.from_pretrained.mirror",description:`<strong>mirror</strong> (<code>str</code>, <em>optional</em>) &#x2014;
Mirror source to resolve accessibility issues if you&#x2019;re downloading a model in China. We do not
guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
information.`,name:"mirror"},{anchor:"diffusers.AutoPipelineForInpainting.from_pretrained.device_map",description:`<strong>device_map</strong> (<code>str</code> or <code>Dict[str, Union[int, str, torch.device]]</code>, <em>optional</em>) &#x2014;
A map that specifies where each submodule should go. It doesn&#x2019;t need to be defined for each
parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the
same device.</p>
<p>Set <code>device_map=&quot;auto&quot;</code> to have &#x1F917; Accelerate automatically compute the most optimized <code>device_map</code>. For
more information about each option see <a href="https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map" rel="nofollow">designing a device
map</a>.`,name:"device_map"},{anchor:"diffusers.AutoPipelineForInpainting.from_pretrained.max_memory",description:`<strong>max_memory</strong> (<code>Dict</code>, <em>optional</em>) &#x2014;
A dictionary device identifier for the maximum memory. Will default to the maximum memory available for
each GPU and the available CPU RAM if unset.`,name:"max_memory"},{anchor:"diffusers.AutoPipelineForInpainting.from_pretrained.offload_folder",description:`<strong>offload_folder</strong> (<code>str</code> or <code>os.PathLike</code>, <em>optional</em>) &#x2014;
The path to offload weights if device_map contains the value <code>&quot;disk&quot;</code>.`,name:"offload_folder"},{anchor:"diffusers.AutoPipelineForInpainting.from_pretrained.offload_state_dict",description:`<strong>offload_state_dict</strong> (<code>bool</code>, <em>optional</em>) &#x2014;
If <code>True</code>, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if
the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to <code>True</code>
when there is some disk offload.`,name:"offload_state_dict"},{anchor:"diffusers.AutoPipelineForInpainting.from_pretrained.low_cpu_mem_usage",description:`<strong>low_cpu_mem_usage</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code> if torch version &gt;= 1.9.0 else <code>False</code>) &#x2014;
Speed up model loading only loading the pretrained weights and not initializing the weights. This also
tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
Only supported for PyTorch &gt;= 1.9.0. If you are using an older version of PyTorch, setting this
argument to <code>True</code> will raise an error.`,name:"low_cpu_mem_usage"},{anchor:"diffusers.AutoPipelineForInpainting.from_pretrained.use_safetensors",description:`<strong>use_safetensors</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>None</code>) &#x2014;
If set to <code>None</code>, the safetensors weights are downloaded if they&#x2019;re available <strong>and</strong> if the
safetensors library is installed. If set to <code>True</code>, the model is forcibly loaded from safetensors
weights. If set to <code>False</code>, safetensors weights are not loaded.`,name:"use_safetensors"},{anchor:"diffusers.AutoPipelineForInpainting.from_pretrained.kwargs",description:`<strong>kwargs</strong> (remaining dictionary of keyword arguments, <em>optional</em>) &#x2014;
Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline
class). The overwritten components are passed directly to the pipelines <code>__init__</code> method. See example
below for more information.`,name:"kwargs"},{anchor:"diffusers.AutoPipelineForInpainting.from_pretrained.variant",description:`<strong>variant</strong> (<code>str</code>, <em>optional</em>) &#x2014;
Load weights from a specified variant filename such as <code>&quot;fp16&quot;</code> or <code>&quot;ema&quot;</code>. This is ignored when
loading <code>from_flax</code>.`,name:"variant"}],source:"https://github.com/huggingface/diffusers/blob/vr_12595/src/diffusers/pipelines/auto_pipeline.py#L911"}}),te=new Q({props:{anchor:"diffusers.AutoPipelineForInpainting.from_pretrained.example",$$slots:{default:[kn]},$$scope:{ctx:C}}}),oe=new Q({props:{anchor:"diffusers.AutoPipelineForInpainting.from_pretrained.example-2",$$slots:{default:[Un]},$$scope:{ctx:C}}}),ye=new V({props:{name:"from_pipe",anchor:"diffusers.AutoPipelineForInpainting.from_pipe",parameters:[{name:"pipeline",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.AutoPipelineForInpainting.from_pipe.pipeline",description:`<strong>pipeline</strong> (<code>DiffusionPipeline</code>) &#x2014;
an instantiated <code>DiffusionPipeline</code> object`,name:"pipeline"}],source:"https://github.com/huggingface/diffusers/blob/vr_12595/src/diffusers/pipelines/auto_pipeline.py#L1067"}}),ne=new Q({props:{anchor:"diffusers.AutoPipelineForInpainting.from_pipe.example",$$slots:{default:[Cn]},$$scope:{ctx:C}}}),ve=new vn({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/auto_pipeline.md"}}),{c(){n=r("meta"),g=i(),p=r("p"),s=i(),_(f.$$.fragment),e=i(),_(u.$$.fragment),rt=i(),ie=r("p"),ie.innerHTML=Mo,lt=i(),E=r("blockquote"),E.innerHTML=$o,pt=i(),_(ae.$$.fragment),dt=i(),P=r("div"),_(se.$$.fragment),yt=i(),we=r("p"),we.innerHTML=To,vt=i(),xe=r("p"),xe.innerHTML=Io,wt=i(),Me=r("p"),Me.textContent=ko,xt=i(),$e=r("ul"),$e.innerHTML=Uo,Mt=i(),M=r("div"),_(re.$$.fragment),$t=i(),Te=r("p"),Te.textContent=Co,Tt=i(),Ie=r("p"),Ie.textContent=Po,It=i(),ke=r("ol"),ke.innerHTML=jo,kt=i(),Ue=r("p"),Ue.innerHTML=Fo,Ut=i(),Ce=r("p"),Ce.innerHTML=Jo,Ct=i(),_(z.$$.fragment),Pt=i(),le=r("blockquote"),le.innerHTML=Zo,jt=i(),_(D.$$.fragment),Ft=i(),G=r("div"),_(pe.$$.fragment),Jt=i(),Pe=r("p"),Pe.textContent=Ao,Zt=i(),je=r("p"),je.textContent=Go,At=i(),Fe=r("p"),Fe.textContent=Wo,Gt=i(),Je=r("p"),Je.innerHTML=Bo,Wt=i(),_(N.$$.fragment),ct=i(),_(de.$$.fragment),mt=i(),j=r("div"),_(ce.$$.fragment),Bt=i(),Ze=r("p"),Ze.innerHTML=Ho,Ht=i(),Ae=r("p"),Ae.innerHTML=Ro,Rt=i(),Ge=r("p"),Ge.textContent=So,St=i(),We=r("ul"),We.innerHTML=Lo,Lt=i(),$=r("div"),_(me.$$.fragment),Xt=i(),Be=r("p"),Be.textContent=Xo,Vt=i(),He=r("p"),He.textContent=Vo,Qt=i(),Re=r("ol"),Re.innerHTML=Qo,Yt=i(),Se=r("p"),Se.innerHTML=Yo,qt=i(),Le=r("p"),Le.innerHTML=qo,Et=i(),_(K.$$.fragment),zt=i(),fe=r("blockquote"),fe.innerHTML=Eo,Dt=i(),_(O.$$.fragment),Nt=i(),W=r("div"),_(ue.$$.fragment),Kt=i(),Xe=r("p"),Xe.textContent=zo,Ot=i(),Ve=r("p"),Ve.textContent=Do,eo=i(),Qe=r("p"),Qe.textContent=No,to=i(),Ye=r("p"),Ye.innerHTML=Ko,oo=i(),_(ee.$$.fragment),ft=i(),_(he.$$.fragment),ut=i(),F=r("div"),_(ge.$$.fragment),no=i(),qe=r("p"),qe.innerHTML=Oo,io=i(),Ee=r("p"),Ee.innerHTML=en,ao=i(),ze=r("p"),ze.textContent=tn,so=i(),De=r("ul"),De.innerHTML=on,ro=i(),T=r("div"),_(_e.$$.fragment),lo=i(),Ne=r("p"),Ne.textContent=nn,po=i(),Ke=r("p"),Ke.textContent=an,co=i(),Oe=r("ol"),Oe.innerHTML=sn,mo=i(),et=r("p"),et.innerHTML=rn,fo=i(),tt=r("p"),tt.innerHTML=ln,uo=i(),_(te.$$.fragment),ho=i(),be=r("blockquote"),be.innerHTML=pn,go=i(),_(oe.$$.fragment),_o=i(),B=r("div"),_(ye.$$.fragment),bo=i(),ot=r("p"),ot.textContent=dn,yo=i(),nt=r("p"),nt.textContent=cn,vo=i(),it=r("p"),it.textContent=mn,wo=i(),at=r("p"),at.innerHTML=fn,xo=i(),_(ne.$$.fragment),ht=i(),_(ve.$$.fragment),gt=i(),st=r("p"),this.h()},l(t){const m=bn("svelte-u9bgzb",document.head);n=l(m,"META",{name:!0,content:!0}),m.forEach(c),g=a(t),p=l(t,"P",{}),X(p).forEach(c),s=a(t),b(f.$$.fragment,t),e=a(t),b(u.$$.fragment,t),rt=a(t),ie=l(t,"P",{"data-svelte-h":!0}),d(ie)!=="svelte-10ft6k8"&&(ie.innerHTML=Mo),lt=a(t),E=l(t,"BLOCKQUOTE",{class:!0,"data-svelte-h":!0}),d(E)!=="svelte-1riqrnp"&&(E.innerHTML=$o),pt=a(t),b(ae.$$.fragment,t),dt=a(t),P=l(t,"DIV",{class:!0});var J=X(P);b(se.$$.fragment,J),yt=a(J),we=l(J,"P",{"data-svelte-h":!0}),d(we)!=="svelte-1sg601g"&&(we.innerHTML=To),vt=a(J),xe=l(J,"P",{"data-svelte-h":!0}),d(xe)!=="svelte-13laq45"&&(xe.innerHTML=Io),wt=a(J),Me=l(J,"P",{"data-svelte-h":!0}),d(Me)!=="svelte-wz8va1"&&(Me.textContent=ko),xt=a(J),$e=l(J,"UL",{"data-svelte-h":!0}),d($e)!=="svelte-56flce"&&($e.innerHTML=Uo),Mt=a(J),M=l(J,"DIV",{class:!0});var I=X(M);b(re.$$.fragment,I),$t=a(I),Te=l(I,"P",{"data-svelte-h":!0}),d(Te)!=="svelte-73gcl"&&(Te.textContent=Co),Tt=a(I),Ie=l(I,"P",{"data-svelte-h":!0}),d(Ie)!=="svelte-3eelph"&&(Ie.textContent=Po),It=a(I),ke=l(I,"OL",{"data-svelte-h":!0}),d(ke)!=="svelte-rrjsq6"&&(ke.innerHTML=jo),kt=a(I),Ue=l(I,"P",{"data-svelte-h":!0}),d(Ue)!=="svelte-ts17wo"&&(Ue.innerHTML=Fo),Ut=a(I),Ce=l(I,"P",{"data-svelte-h":!0}),d(Ce)!=="svelte-1p5vgmd"&&(Ce.innerHTML=Jo),Ct=a(I),b(z.$$.fragment,I),Pt=a(I),le=l(I,"BLOCKQUOTE",{class:!0,"data-svelte-h":!0}),d(le)!=="svelte-zg8xkk"&&(le.innerHTML=Zo),jt=a(I),b(D.$$.fragment,I),I.forEach(c),Ft=a(J),G=l(J,"DIV",{class:!0});var H=X(G);b(pe.$$.fragment,H),Jt=a(H),Pe=l(H,"P",{"data-svelte-h":!0}),d(Pe)!=="svelte-6rlsyn"&&(Pe.textContent=Ao),Zt=a(H),je=l(H,"P",{"data-svelte-h":!0}),d(je)!=="svelte-1im6dea"&&(je.textContent=Go),At=a(H),Fe=l(H,"P",{"data-svelte-h":!0}),d(Fe)!=="svelte-1u0ofey"&&(Fe.textContent=Wo),Gt=a(H),Je=l(H,"P",{"data-svelte-h":!0}),d(Je)!=="svelte-1p5vgmd"&&(Je.innerHTML=Bo),Wt=a(H),b(N.$$.fragment,H),H.forEach(c),J.forEach(c),ct=a(t),b(de.$$.fragment,t),mt=a(t),j=l(t,"DIV",{class:!0});var Z=X(j);b(ce.$$.fragment,Z),Bt=a(Z),Ze=l(Z,"P",{"data-svelte-h":!0}),d(Ze)!=="svelte-7joem6"&&(Ze.innerHTML=Ho),Ht=a(Z),Ae=l(Z,"P",{"data-svelte-h":!0}),d(Ae)!=="svelte-13laq45"&&(Ae.innerHTML=Ro),Rt=a(Z),Ge=l(Z,"P",{"data-svelte-h":!0}),d(Ge)!=="svelte-wz8va1"&&(Ge.textContent=So),St=a(Z),We=l(Z,"UL",{"data-svelte-h":!0}),d(We)!=="svelte-56flce"&&(We.innerHTML=Lo),Lt=a(Z),$=l(Z,"DIV",{class:!0});var k=X($);b(me.$$.fragment,k),Xt=a(k),Be=l(k,"P",{"data-svelte-h":!0}),d(Be)!=="svelte-ehfuat"&&(Be.textContent=Xo),Vt=a(k),He=l(k,"P",{"data-svelte-h":!0}),d(He)!=="svelte-3eelph"&&(He.textContent=Vo),Qt=a(k),Re=l(k,"OL",{"data-svelte-h":!0}),d(Re)!=="svelte-1pj7v4w"&&(Re.innerHTML=Qo),Yt=a(k),Se=l(k,"P",{"data-svelte-h":!0}),d(Se)!=="svelte-16i6td2"&&(Se.innerHTML=Yo),qt=a(k),Le=l(k,"P",{"data-svelte-h":!0}),d(Le)!=="svelte-1p5vgmd"&&(Le.innerHTML=qo),Et=a(k),b(K.$$.fragment,k),zt=a(k),fe=l(k,"BLOCKQUOTE",{class:!0,"data-svelte-h":!0}),d(fe)!=="svelte-zg8xkk"&&(fe.innerHTML=Eo),Dt=a(k),b(O.$$.fragment,k),k.forEach(c),Nt=a(Z),W=l(Z,"DIV",{class:!0});var R=X(W);b(ue.$$.fragment,R),Kt=a(R),Xe=l(R,"P",{"data-svelte-h":!0}),d(Xe)!=="svelte-10ckmdb"&&(Xe.textContent=zo),Ot=a(R),Ve=l(R,"P",{"data-svelte-h":!0}),d(Ve)!=="svelte-3o5yim"&&(Ve.textContent=Do),eo=a(R),Qe=l(R,"P",{"data-svelte-h":!0}),d(Qe)!=="svelte-1u0ofey"&&(Qe.textContent=No),to=a(R),Ye=l(R,"P",{"data-svelte-h":!0}),d(Ye)!=="svelte-1p5vgmd"&&(Ye.innerHTML=Ko),oo=a(R),b(ee.$$.fragment,R),R.forEach(c),Z.forEach(c),ft=a(t),b(he.$$.fragment,t),ut=a(t),F=l(t,"DIV",{class:!0});var A=X(F);b(ge.$$.fragment,A),no=a(A),qe=l(A,"P",{"data-svelte-h":!0}),d(qe)!=="svelte-1mg1xkq"&&(qe.innerHTML=Oo),io=a(A),Ee=l(A,"P",{"data-svelte-h":!0}),d(Ee)!=="svelte-13laq45"&&(Ee.innerHTML=en),ao=a(A),ze=l(A,"P",{"data-svelte-h":!0}),d(ze)!=="svelte-wz8va1"&&(ze.textContent=tn),so=a(A),De=l(A,"UL",{"data-svelte-h":!0}),d(De)!=="svelte-56flce"&&(De.innerHTML=on),ro=a(A),T=l(A,"DIV",{class:!0});var U=X(T);b(_e.$$.fragment,U),lo=a(U),Ne=l(U,"P",{"data-svelte-h":!0}),d(Ne)!=="svelte-1fjqdar"&&(Ne.textContent=nn),po=a(U),Ke=l(U,"P",{"data-svelte-h":!0}),d(Ke)!=="svelte-3eelph"&&(Ke.textContent=an),co=a(U),Oe=l(U,"OL",{"data-svelte-h":!0}),d(Oe)!=="svelte-fu4x5s"&&(Oe.innerHTML=sn),mo=a(U),et=l(U,"P",{"data-svelte-h":!0}),d(et)!=="svelte-7tbbq8"&&(et.innerHTML=rn),fo=a(U),tt=l(U,"P",{"data-svelte-h":!0}),d(tt)!=="svelte-1p5vgmd"&&(tt.innerHTML=ln),uo=a(U),b(te.$$.fragment,U),ho=a(U),be=l(U,"BLOCKQUOTE",{class:!0,"data-svelte-h":!0}),d(be)!=="svelte-zg8xkk"&&(be.innerHTML=pn),go=a(U),b(oe.$$.fragment,U),U.forEach(c),_o=a(A),B=l(A,"DIV",{class:!0});var S=X(B);b(ye.$$.fragment,S),bo=a(S),ot=l(S,"P",{"data-svelte-h":!0}),d(ot)!=="svelte-1ass5kx"&&(ot.textContent=dn),yo=a(S),nt=l(S,"P",{"data-svelte-h":!0}),d(nt)!=="svelte-1jvxxhk"&&(nt.textContent=cn),vo=a(S),it=l(S,"P",{"data-svelte-h":!0}),d(it)!=="svelte-16h1ngl"&&(it.textContent=mn),wo=a(S),at=l(S,"P",{"data-svelte-h":!0}),d(at)!=="svelte-1p5vgmd"&&(at.innerHTML=fn),xo=a(S),b(ne.$$.fragment,S),S.forEach(c),A.forEach(c),ht=a(t),b(ve.$$.fragment,t),gt=a(t),st=l(t,"P",{}),X(st).forEach(c),this.h()},h(){L(n,"name","hf:doc:metadata"),L(n,"content",jn),L(E,"class","tip"),L(le,"class","tip"),L(M,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),L(G,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),L(P,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),L(fe,"class","tip"),L($,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),L(W,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),L(j,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),L(be,"class","tip"),L(T,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),L(B,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),L(F,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(t,m){o(document.head,n),h(t,g,m),h(t,p,m),h(t,s,m),y(f,t,m),h(t,e,m),y(u,t,m),h(t,rt,m),h(t,ie,m),h(t,lt,m),h(t,E,m),h(t,pt,m),y(ae,t,m),h(t,dt,m),h(t,P,m),y(se,P,null),o(P,yt),o(P,we),o(P,vt),o(P,xe),o(P,wt),o(P,Me),o(P,xt),o(P,$e),o(P,Mt),o(P,M),y(re,M,null),o(M,$t),o(M,Te),o(M,Tt),o(M,Ie),o(M,It),o(M,ke),o(M,kt),o(M,Ue),o(M,Ut),o(M,Ce),o(M,Ct),y(z,M,null),o(M,Pt),o(M,le),o(M,jt),y(D,M,null),o(P,Ft),o(P,G),y(pe,G,null),o(G,Jt),o(G,Pe),o(G,Zt),o(G,je),o(G,At),o(G,Fe),o(G,Gt),o(G,Je),o(G,Wt),y(N,G,null),h(t,ct,m),y(de,t,m),h(t,mt,m),h(t,j,m),y(ce,j,null),o(j,Bt),o(j,Ze),o(j,Ht),o(j,Ae),o(j,Rt),o(j,Ge),o(j,St),o(j,We),o(j,Lt),o(j,$),y(me,$,null),o($,Xt),o($,Be),o($,Vt),o($,He),o($,Qt),o($,Re),o($,Yt),o($,Se),o($,qt),o($,Le),o($,Et),y(K,$,null),o($,zt),o($,fe),o($,Dt),y(O,$,null),o(j,Nt),o(j,W),y(ue,W,null),o(W,Kt),o(W,Xe),o(W,Ot),o(W,Ve),o(W,eo),o(W,Qe),o(W,to),o(W,Ye),o(W,oo),y(ee,W,null),h(t,ft,m),y(he,t,m),h(t,ut,m),h(t,F,m),y(ge,F,null),o(F,no),o(F,qe),o(F,io),o(F,Ee),o(F,ao),o(F,ze),o(F,so),o(F,De),o(F,ro),o(F,T),y(_e,T,null),o(T,lo),o(T,Ne),o(T,po),o(T,Ke),o(T,co),o(T,Oe),o(T,mo),o(T,et),o(T,fo),o(T,tt),o(T,uo),y(te,T,null),o(T,ho),o(T,be),o(T,go),y(oe,T,null),o(F,_o),o(F,B),y(ye,B,null),o(B,bo),o(B,ot),o(B,yo),o(B,nt),o(B,vo),o(B,it),o(B,wo),o(B,at),o(B,xo),y(ne,B,null),h(t,ht,m),y(ve,t,m),h(t,gt,m),h(t,st,m),_t=!0},p(t,[m]){const J={};m&2&&(J.$$scope={dirty:m,ctx:t}),z.$set(J);const I={};m&2&&(I.$$scope={dirty:m,ctx:t}),D.$set(I);const H={};m&2&&(H.$$scope={dirty:m,ctx:t}),N.$set(H);const Z={};m&2&&(Z.$$scope={dirty:m,ctx:t}),K.$set(Z);const k={};m&2&&(k.$$scope={dirty:m,ctx:t}),O.$set(k);const R={};m&2&&(R.$$scope={dirty:m,ctx:t}),ee.$set(R);const A={};m&2&&(A.$$scope={dirty:m,ctx:t}),te.$set(A);const U={};m&2&&(U.$$scope={dirty:m,ctx:t}),oe.$set(U);const S={};m&2&&(S.$$scope={dirty:m,ctx:t}),ne.$set(S)},i(t){_t||(v(f.$$.fragment,t),v(u.$$.fragment,t),v(ae.$$.fragment,t),v(se.$$.fragment,t),v(re.$$.fragment,t),v(z.$$.fragment,t),v(D.$$.fragment,t),v(pe.$$.fragment,t),v(N.$$.fragment,t),v(de.$$.fragment,t),v(ce.$$.fragment,t),v(me.$$.fragment,t),v(K.$$.fragment,t),v(O.$$.fragment,t),v(ue.$$.fragment,t),v(ee.$$.fragment,t),v(he.$$.fragment,t),v(ge.$$.fragment,t),v(_e.$$.fragment,t),v(te.$$.fragment,t),v(oe.$$.fragment,t),v(ye.$$.fragment,t),v(ne.$$.fragment,t),v(ve.$$.fragment,t),_t=!0)},o(t){w(f.$$.fragment,t),w(u.$$.fragment,t),w(ae.$$.fragment,t),w(se.$$.fragment,t),w(re.$$.fragment,t),w(z.$$.fragment,t),w(D.$$.fragment,t),w(pe.$$.fragment,t),w(N.$$.fragment,t),w(de.$$.fragment,t),w(ce.$$.fragment,t),w(me.$$.fragment,t),w(K.$$.fragment,t),w(O.$$.fragment,t),w(ue.$$.fragment,t),w(ee.$$.fragment,t),w(he.$$.fragment,t),w(ge.$$.fragment,t),w(_e.$$.fragment,t),w(te.$$.fragment,t),w(oe.$$.fragment,t),w(ye.$$.fragment,t),w(ne.$$.fragment,t),w(ve.$$.fragment,t),_t=!1},d(t){t&&(c(g),c(p),c(s),c(e),c(rt),c(ie),c(lt),c(E),c(pt),c(dt),c(P),c(ct),c(mt),c(j),c(ft),c(ut),c(F),c(ht),c(gt),c(st)),c(n),x(f,t),x(u,t),x(ae,t),x(se),x(re),x(z),x(D),x(pe),x(N),x(de,t),x(ce),x(me),x(K),x(O),x(ue),x(ee),x(he,t),x(ge),x(_e),x(te),x(oe),x(ye),x(ne),x(ve,t)}}}const jn='{"title":"AutoPipeline","local":"autopipeline","sections":[{"title":"AutoPipelineForText2Image","local":"diffusers.AutoPipelineForText2Image","sections":[],"depth":2},{"title":"AutoPipelineForImage2Image","local":"diffusers.AutoPipelineForImage2Image","sections":[],"depth":2},{"title":"AutoPipelineForInpainting","local":"diffusers.AutoPipelineForInpainting","sections":[],"depth":2}],"depth":1}';function Fn(C){return hn(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Rn extends gn{constructor(n){super(),_n(this,n,Fn,Pn,un,{})}}export{Rn as component};

Xet Storage Details

Size:
66.1 kB
·
Xet hash:
61b66095a96d0f1a77c5e33fb1025025b067aa317c7f345bcd6afbbe06ea374f

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.