Buckets:
| import{s as Ss,o as Vs,n as we}from"../chunks/scheduler.53228c21.js";import{S as Ns,i as Fs,e as s,s as n,c as h,h as Bs,a,d as r,b as i,f as T,g,j as d,k as M,l as t,m as p,n as b,t as _,o as v,p as y}from"../chunks/index.100fac89.js";import{C as Xs}from"../chunks/CopyLLMTxtMenu.b134861e.js";import{D as k}from"../chunks/Docstring.ba933fb0.js";import{C as $e}from"../chunks/CodeBlock.d30a6509.js";import{E as xe}from"../chunks/ExampleCodeBlock.c00328ba.js";import{H as en,E as Rs}from"../chunks/MermaidChart.svelte_svelte_type_style_lang.88d816fc.js";function Es(U){let l,C="Examples:",w,f,x;return f=new $e({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMCglMEElMjAlMjAlMjAlMjBTdGFibGVEaWZmdXNpb25QaXBlbGluZSUyQyUwQSUyMCUyMCUyMCUyMFN0YWJsZURpZmZ1c2lvbkltZzJJbWdQaXBlbGluZSUyQyUwQSUyMCUyMCUyMCUyMFN0YWJsZURpZmZ1c2lvbklucGFpbnRQaXBlbGluZSUyQyUwQSklMEElMEF0ZXh0MmltZyUyMCUzRCUyMFN0YWJsZURpZmZ1c2lvblBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJzdGFibGUtZGlmZnVzaW9uLXYxLTUlMkZzdGFibGUtZGlmZnVzaW9uLXYxLTUlMjIpJTBBaW1nMmltZyUyMCUzRCUyMFN0YWJsZURpZmZ1c2lvbkltZzJJbWdQaXBlbGluZSgqKnRleHQyaW1nLmNvbXBvbmVudHMpJTBBaW5wYWludCUyMCUzRCUyMFN0YWJsZURpZmZ1c2lvbklucGFpbnRQaXBlbGluZSgqKnRleHQyaW1nLmNvbXBvbmVudHMp",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> ( | |
| <span class="hljs-meta">... </span> StableDiffusionPipeline, | |
| <span class="hljs-meta">... </span> StableDiffusionImg2ImgPipeline, | |
| <span class="hljs-meta">... </span> StableDiffusionInpaintPipeline, | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>text2img = StableDiffusionPipeline.from_pretrained(<span class="hljs-string">"stable-diffusion-v1-5/stable-diffusion-v1-5"</span>) | |
| <span class="hljs-meta">>>> </span>img2img = StableDiffusionImg2ImgPipeline(**text2img.components) | |
| <span class="hljs-meta">>>> </span>inpaint = StableDiffusionInpaintPipeline(**text2img.components)`,wrap:!1}}),{c(){l=s("p"),l.textContent=C,w=n(),h(f.$$.fragment)},l(o){l=a(o,"P",{"data-svelte-h":!0}),d(l)!=="svelte-kvfsh7"&&(l.textContent=C),w=i(o),g(f.$$.fragment,o)},m(o,$){p(o,l,$),p(o,w,$),b(f,o,$),x=!0},p:we,i(o){x||(_(f.$$.fragment,o),x=!0)},o(o){v(f.$$.fragment,o),x=!1},d(o){o&&(r(l),r(w)),y(f,o)}}}function zs(U){let l,C="Examples:",w,f,x;return f=new $e({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwU3RhYmxlRGlmZnVzaW9uUGlwZWxpbmUlMEElMEFwaXBlJTIwJTNEJTIwU3RhYmxlRGlmZnVzaW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMnN0YWJsZS1kaWZmdXNpb24tdjEtNSUyRnN0YWJsZS1kaWZmdXNpb24tdjEtNSUyMiUyQyUwQSUyMCUyMCUyMCUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUyQyUwQSUyMCUyMCUyMCUyMHVzZV9zYWZldGVuc29ycyUzRFRydWUlMkMlMEEpJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTIyYSUyMHBob3RvJTIwb2YlMjBhbiUyMGFzdHJvbmF1dCUyMHJpZGluZyUyMGElMjBob3JzZSUyMG9uJTIwbWFycyUyMiUwQXBpcGUuZW5hYmxlX2F0dGVudGlvbl9zbGljaW5nKCklMEFpbWFnZSUyMCUzRCUyMHBpcGUocHJvbXB0KS5pbWFnZXMlNUIwJTVE",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionPipeline | |
| <span class="hljs-meta">>>> </span>pipe = StableDiffusionPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"stable-diffusion-v1-5/stable-diffusion-v1-5"</span>, | |
| <span class="hljs-meta">... </span> torch_dtype=torch.float16, | |
| <span class="hljs-meta">... </span> use_safetensors=<span class="hljs-literal">True</span>, | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"a photo of an astronaut riding a horse on mars"</span> | |
| <span class="hljs-meta">>>> </span>pipe.enable_attention_slicing() | |
| <span class="hljs-meta">>>> </span>image = pipe(prompt).images[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){l=s("p"),l.textContent=C,w=n(),h(f.$$.fragment)},l(o){l=a(o,"P",{"data-svelte-h":!0}),d(l)!=="svelte-kvfsh7"&&(l.textContent=C),w=i(o),g(f.$$.fragment,o)},m(o,$){p(o,l,$),p(o,w,$),b(f,o,$),x=!0},p:we,i(o){x||(_(f.$$.fragment,o),x=!0)},o(o){v(f.$$.fragment,o),x=!1},d(o){o&&(r(l),r(w)),y(f,o)}}}function qs(U){let l,C="Example:",w,f,x;return f=new $e({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMERpZmZ1c2lvblBpcGVsaW5lJTBBaW1wb3J0JTIwdG9yY2glMEElMEFwaXBlJTIwJTNEJTIwRGlmZnVzaW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUyMlF3ZW4lMkZRd2VuLUltYWdlJTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5iZmxvYXQxNiklMEElMEFwaXBlLmVuYWJsZV9ncm91cF9vZmZsb2FkKCUwQSUyMCUyMCUyMCUyMG9ubG9hZF9kZXZpY2UlM0R0b3JjaC5kZXZpY2UoJTIyY3VkYSUyMiklMkMlMEElMjAlMjAlMjAlMjBvZmZsb2FkX2RldmljZSUzRHRvcmNoLmRldmljZSglMjJjcHUlMjIpJTJDJTBBJTIwJTIwJTIwJTIwb2ZmbG9hZF90eXBlJTNEJTIybGVhZl9sZXZlbCUyMiUyQyUwQSUyMCUyMCUyMCUyMHVzZV9zdHJlYW0lM0RUcnVlJTJDJTBBKSUwQWltYWdlJTIwJTNEJTIwcGlwZSglMjJhJTIwYmVhdXRpZnVsJTIwc3Vuc2V0JTIyKS5pbWFnZXMlNUIwJTVE",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> DiffusionPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span>pipe = DiffusionPipeline.from_pretrained(<span class="hljs-string">"Qwen/Qwen-Image"</span>, torch_dtype=torch.bfloat16) | |
| <span class="hljs-meta">>>> </span>pipe.enable_group_offload( | |
| <span class="hljs-meta">... </span> onload_device=torch.device(<span class="hljs-string">"cuda"</span>), | |
| <span class="hljs-meta">... </span> offload_device=torch.device(<span class="hljs-string">"cpu"</span>), | |
| <span class="hljs-meta">... </span> offload_type=<span class="hljs-string">"leaf_level"</span>, | |
| <span class="hljs-meta">... </span> use_stream=<span class="hljs-literal">True</span>, | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>image = pipe(<span class="hljs-string">"a beautiful sunset"</span>).images[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){l=s("p"),l.textContent=C,w=n(),h(f.$$.fragment)},l(o){l=a(o,"P",{"data-svelte-h":!0}),d(l)!=="svelte-11lpom8"&&(l.textContent=C),w=i(o),g(f.$$.fragment,o)},m(o,$){p(o,l,$),p(o,w,$),b(f,o,$),x=!0},p:we,i(o){x||(_(f.$$.fragment,o),x=!0)},o(o){v(f.$$.fragment,o),x=!1},d(o){o&&(r(l),r(w)),y(f,o)}}}function Ys(U){let l,C="Examples:",w,f,x;return f=new $e({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwRGlmZnVzaW9uUGlwZWxpbmUlMEFmcm9tJTIweGZvcm1lcnMub3BzJTIwaW1wb3J0JTIwTWVtb3J5RWZmaWNpZW50QXR0ZW50aW9uRmxhc2hBdHRlbnRpb25PcCUwQSUwQXBpcGUlMjAlM0QlMjBEaWZmdXNpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTIyc3RhYmlsaXR5YWklMkZzdGFibGUtZGlmZnVzaW9uLTItMSUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiklMEFwaXBlJTIwJTNEJTIwcGlwZS50byglMjJjdWRhJTIyKSUwQXBpcGUuZW5hYmxlX3hmb3JtZXJzX21lbW9yeV9lZmZpY2llbnRfYXR0ZW50aW9uKGF0dGVudGlvbl9vcCUzRE1lbW9yeUVmZmljaWVudEF0dGVudGlvbkZsYXNoQXR0ZW50aW9uT3ApJTBBJTIzJTIwV29ya2Fyb3VuZCUyMGZvciUyMG5vdCUyMGFjY2VwdGluZyUyMGF0dGVudGlvbiUyMHNoYXBlJTIwdXNpbmclMjBWQUUlMjBmb3IlMjBGbGFzaCUyMEF0dGVudGlvbiUwQXBpcGUudmFlLmVuYWJsZV94Zm9ybWVyc19tZW1vcnlfZWZmaWNpZW50X2F0dGVudGlvbihhdHRlbnRpb25fb3AlM0ROb25lKQ==",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> DiffusionPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> xformers.ops <span class="hljs-keyword">import</span> MemoryEfficientAttentionFlashAttentionOp | |
| <span class="hljs-meta">>>> </span>pipe = DiffusionPipeline.from_pretrained(<span class="hljs-string">"stabilityai/stable-diffusion-2-1"</span>, torch_dtype=torch.float16) | |
| <span class="hljs-meta">>>> </span>pipe = pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>pipe.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp) | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Workaround for not accepting attention shape using VAE for Flash Attention</span> | |
| <span class="hljs-meta">>>> </span>pipe.vae.enable_xformers_memory_efficient_attention(attention_op=<span class="hljs-literal">None</span>)`,wrap:!1}}),{c(){l=s("p"),l.textContent=C,w=n(),h(f.$$.fragment)},l(o){l=a(o,"P",{"data-svelte-h":!0}),d(l)!=="svelte-kvfsh7"&&(l.textContent=C),w=i(o),g(f.$$.fragment,o)},m(o,$){p(o,l,$),p(o,w,$),b(f,o,$),x=!0},p:we,i(o){x||(_(f.$$.fragment,o),x=!0)},o(o){v(f.$$.fragment,o),x=!1},d(o){o&&(r(l),r(w)),y(f,o)}}}function As(U){let l,C="Examples:",w,f,x;return f=new $e({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMFN0YWJsZURpZmZ1c2lvblBpcGVsaW5lJTJDJTIwU3RhYmxlRGlmZnVzaW9uU0FHUGlwZWxpbmUlMEElMEFwaXBlJTIwJTNEJTIwU3RhYmxlRGlmZnVzaW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUyMnN0YWJsZS1kaWZmdXNpb24tdjEtNSUyRnN0YWJsZS1kaWZmdXNpb24tdjEtNSUyMiklMEFuZXdfcGlwZSUyMCUzRCUyMFN0YWJsZURpZmZ1c2lvblNBR1BpcGVsaW5lLmZyb21fcGlwZShwaXBlKQ==",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionPipeline, StableDiffusionSAGPipeline | |
| <span class="hljs-meta">>>> </span>pipe = StableDiffusionPipeline.from_pretrained(<span class="hljs-string">"stable-diffusion-v1-5/stable-diffusion-v1-5"</span>) | |
| <span class="hljs-meta">>>> </span>new_pipe = StableDiffusionSAGPipeline.from_pipe(pipe)`,wrap:!1}}),{c(){l=s("p"),l.textContent=C,w=n(),h(f.$$.fragment)},l(o){l=a(o,"P",{"data-svelte-h":!0}),d(l)!=="svelte-kvfsh7"&&(l.textContent=C),w=i(o),g(f.$$.fragment,o)},m(o,$){p(o,l,$),p(o,w,$),b(f,o,$),x=!0},p:we,i(o){x||(_(f.$$.fragment,o),x=!0)},o(o){v(f.$$.fragment,o),x=!1},d(o){o&&(r(l),r(w)),y(f,o)}}}function Qs(U){let l,C="If you get the error message below, you need to finetune the weights for your downstream task:",w,f,x;return f=new $e({props:{code:"U29tZSUyMHdlaWdodHMlMjBvZiUyMFVOZXQyRENvbmRpdGlvbk1vZGVsJTIwd2VyZSUyMG5vdCUyMGluaXRpYWxpemVkJTIwZnJvbSUyMHRoZSUyMG1vZGVsJTIwY2hlY2twb2ludCUyMGF0JTIwc3RhYmxlLWRpZmZ1c2lvbi12MS01JTJGc3RhYmxlLWRpZmZ1c2lvbi12MS01JTIwYW5kJTIwYXJlJTIwbmV3bHklMjBpbml0aWFsaXplZCUyMGJlY2F1c2UlMjB0aGUlMjBzaGFwZXMlMjBkaWQlMjBub3QlMjBtYXRjaCUzQSUwQS0lMjBjb252X2luLndlaWdodCUzQSUyMGZvdW5kJTIwc2hhcGUlMjB0b3JjaC5TaXplKCU1QjMyMCUyQyUyMDQlMkMlMjAzJTJDJTIwMyU1RCklMjBpbiUyMHRoZSUyMGNoZWNrcG9pbnQlMjBhbmQlMjB0b3JjaC5TaXplKCU1QjMyMCUyQyUyMDklMkMlMjAzJTJDJTIwMyU1RCklMjBpbiUyMHRoZSUyMG1vZGVsJTIwaW5zdGFudGlhdGVkJTBBWW91JTIwc2hvdWxkJTIwcHJvYmFibHklMjBUUkFJTiUyMHRoaXMlMjBtb2RlbCUyMG9uJTIwYSUyMGRvd24tc3RyZWFtJTIwdGFzayUyMHRvJTIwYmUlMjBhYmxlJTIwdG8lMjB1c2UlMjBpdCUyMGZvciUyMHByZWRpY3Rpb25zJTIwYW5kJTIwaW5mZXJlbmNlLg==",highlighted:`Some weights of UNet2DConditionModel were not initialized from the model checkpoint <span class="hljs-built_in">at</span> stable-<span class="hljs-keyword">diffusion-v1-5/stable-diffusion-v1-5 </span><span class="hljs-keyword">and </span>are newly initialized <span class="hljs-keyword">because </span>the <span class="hljs-keyword">shapes </span><span class="hljs-keyword">did </span>not match: | |
| - conv_in.weight: found <span class="hljs-keyword">shape </span>torch.Size([<span class="hljs-number">320</span>, <span class="hljs-number">4</span>, <span class="hljs-number">3</span>, <span class="hljs-number">3</span>]) in the checkpoint <span class="hljs-keyword">and </span>torch.Size([<span class="hljs-number">320</span>, <span class="hljs-number">9</span>, <span class="hljs-number">3</span>, <span class="hljs-number">3</span>]) in the model <span class="hljs-keyword">instantiated | |
| </span>You <span class="hljs-keyword">should </span>probably TRAIN this model on a down-stream task to <span class="hljs-keyword">be </span>able to use it for predictions <span class="hljs-keyword">and </span>inference.`,wrap:!1}}),{c(){l=s("p"),l.textContent=C,w=n(),h(f.$$.fragment)},l(o){l=a(o,"P",{"data-svelte-h":!0}),d(l)!=="svelte-xueb0m"&&(l.textContent=C),w=i(o),g(f.$$.fragment,o)},m(o,$){p(o,l,$),p(o,w,$),b(f,o,$),x=!0},p:we,i(o){x||(_(f.$$.fragment,o),x=!0)},o(o){v(f.$$.fragment,o),x=!1},d(o){o&&(r(l),r(w)),y(f,o)}}}function Os(U){let l,C="Examples:",w,f,x;return f=new $e({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMERpZmZ1c2lvblBpcGVsaW5lJTBBJTBBJTIzJTIwRG93bmxvYWQlMjBwaXBlbGluZSUyMGZyb20lMjBodWdnaW5nZmFjZS5jbyUyMGFuZCUyMGNhY2hlLiUwQXBpcGVsaW5lJTIwJTNEJTIwRGlmZnVzaW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUyMkNvbXBWaXMlMkZsZG0tdGV4dDJpbS1sYXJnZS0yNTYlMjIpJTBBJTBBJTIzJTIwRG93bmxvYWQlMjBwaXBlbGluZSUyMHRoYXQlMjByZXF1aXJlcyUyMGFuJTIwYXV0aG9yaXphdGlvbiUyMHRva2VuJTBBJTIzJTIwRm9yJTIwbW9yZSUyMGluZm9ybWF0aW9uJTIwb24lMjBhY2Nlc3MlMjB0b2tlbnMlMkMlMjBwbGVhc2UlMjByZWZlciUyMHRvJTIwdGhpcyUyMHNlY3Rpb24lMEElMjMlMjBvZiUyMHRoZSUyMGRvY3VtZW50YXRpb24lNUQoaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRvY3MlMkZodWIlMkZzZWN1cml0eS10b2tlbnMpJTBBcGlwZWxpbmUlMjAlM0QlMjBEaWZmdXNpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTIyc3RhYmxlLWRpZmZ1c2lvbi12MS01JTJGc3RhYmxlLWRpZmZ1c2lvbi12MS01JTIyKSUwQSUwQSUyMyUyMFVzZSUyMGElMjBkaWZmZXJlbnQlMjBzY2hlZHVsZXIlMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwTE1TRGlzY3JldGVTY2hlZHVsZXIlMEElMEFzY2hlZHVsZXIlMjAlM0QlMjBMTVNEaXNjcmV0ZVNjaGVkdWxlci5mcm9tX2NvbmZpZyhwaXBlbGluZS5zY2hlZHVsZXIuY29uZmlnKSUwQXBpcGVsaW5lLnNjaGVkdWxlciUyMCUzRCUyMHNjaGVkdWxlcg==",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> DiffusionPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Download pipeline from huggingface.co and cache.</span> | |
| <span class="hljs-meta">>>> </span>pipeline = DiffusionPipeline.from_pretrained(<span class="hljs-string">"CompVis/ldm-text2im-large-256"</span>) | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Download pipeline that requires an authorization token</span> | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># For more information on access tokens, please refer to this section</span> | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># of the documentation](https://huggingface.co/docs/hub/security-tokens)</span> | |
| <span class="hljs-meta">>>> </span>pipeline = DiffusionPipeline.from_pretrained(<span class="hljs-string">"stable-diffusion-v1-5/stable-diffusion-v1-5"</span>) | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Use a different scheduler</span> | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> LMSDiscreteScheduler | |
| <span class="hljs-meta">>>> </span>scheduler = LMSDiscreteScheduler.from_config(pipeline.scheduler.config) | |
| <span class="hljs-meta">>>> </span>pipeline.scheduler = scheduler`,wrap:!1}}),{c(){l=s("p"),l.textContent=C,w=n(),h(f.$$.fragment)},l(o){l=a(o,"P",{"data-svelte-h":!0}),d(l)!=="svelte-kvfsh7"&&(l.textContent=C),w=i(o),g(f.$$.fragment,o)},m(o,$){p(o,l,$),p(o,w,$),b(f,o,$),x=!0},p:we,i(o){x||(_(f.$$.fragment,o),x=!0)},o(o){v(f.$$.fragment,o),x=!1},d(o){o&&(r(l),r(w)),y(f,o)}}}function Ks(U){let l,C="Examples:",w,f,x;return f=new $e({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMFVOZXQyRENvbmRpdGlvbk1vZGVsJTBBJTBBdW5ldCUyMCUzRCUyMFVOZXQyRENvbmRpdGlvbk1vZGVsLmZyb21fcHJldHJhaW5lZCglMjJzdGFiaWxpdHlhaSUyRnN0YWJsZS1kaWZmdXNpb24tMiUyMiUyQyUyMHN1YmZvbGRlciUzRCUyMnVuZXQlMjIpJTBBJTBBJTIzJTIwUHVzaCUyMHRoZSUyMCU2MHVuZXQlNjAlMjB0byUyMHlvdXIlMjBuYW1lc3BhY2UlMjB3aXRoJTIwdGhlJTIwbmFtZSUyMCUyMm15LWZpbmV0dW5lZC11bmV0JTIyLiUwQXVuZXQucHVzaF90b19odWIoJTIybXktZmluZXR1bmVkLXVuZXQlMjIpJTBBJTBBJTIzJTIwUHVzaCUyMHRoZSUyMCU2MHVuZXQlNjAlMjB0byUyMGFuJTIwb3JnYW5pemF0aW9uJTIwd2l0aCUyMHRoZSUyMG5hbWUlMjAlMjJteS1maW5ldHVuZWQtdW5ldCUyMi4lMEF1bmV0LnB1c2hfdG9faHViKCUyMnlvdXItb3JnJTJGbXktZmluZXR1bmVkLXVuZXQlMjIp",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> UNet2DConditionModel | |
| unet = UNet2DConditionModel.from_pretrained(<span class="hljs-string">"stabilityai/stable-diffusion-2"</span>, subfolder=<span class="hljs-string">"unet"</span>) | |
| <span class="hljs-comment"># Push the \`unet\` to your namespace with the name "my-finetuned-unet".</span> | |
| unet.push_to_hub(<span class="hljs-string">"my-finetuned-unet"</span>) | |
| <span class="hljs-comment"># Push the \`unet\` to an organization with the name "my-finetuned-unet".</span> | |
| unet.push_to_hub(<span class="hljs-string">"your-org/my-finetuned-unet"</span>)`,wrap:!1}}),{c(){l=s("p"),l.textContent=C,w=n(),h(f.$$.fragment)},l(o){l=a(o,"P",{"data-svelte-h":!0}),d(l)!=="svelte-kvfsh7"&&(l.textContent=C),w=i(o),g(f.$$.fragment,o)},m(o,$){p(o,l,$),p(o,w,$),b(f,o,$),x=!0},p:we,i(o){x||(_(f.$$.fragment,o),x=!0)},o(o){v(f.$$.fragment,o),x=!1},d(o){o&&(r(l),r(w)),y(f,o)}}}function ea(U){let l,C,w,f,x,o,$,ko,Me,ji="Pipelines provide a simple way to run state-of-the-art diffusion models in inference by bundling all of the necessary components (multiple independently-trained models, schedulers, and processors) into a single end-to-end class. Pipelines are flexible and they can be adapted to use different schedulers or even model components.",Co,Te,Ii='All pipelines are built from the base <a href="/docs/diffusers/pr_12625/en/api/pipelines/overview#diffusers.DiffusionPipeline">DiffusionPipeline</a> class which provides basic functionality for loading, downloading, and saving all the components. Specific pipeline types (for example <a href="/docs/diffusers/pr_12625/en/api/pipelines/stable_diffusion/text2img#diffusers.StableDiffusionPipeline">StableDiffusionPipeline</a>) loaded with <a href="/docs/diffusers/pr_12625/en/api/pipelines/overview#diffusers.DiffusionPipeline.from_pretrained">from_pretrained()</a> are automatically detected and the pipeline components are loaded and passed to the <code>__init__</code> function of the pipeline.',Uo,K,Ji='<p>You shouldn’t use the <a href="/docs/diffusers/pr_12625/en/api/pipelines/overview#diffusers.DiffusionPipeline">DiffusionPipeline</a> class for training. Individual components (for example, <a href="/docs/diffusers/pr_12625/en/api/models/unet2d#diffusers.UNet2DModel">UNet2DModel</a> and <a href="/docs/diffusers/pr_12625/en/api/models/unet2d-cond#diffusers.UNet2DConditionModel">UNet2DConditionModel</a>) of diffusion pipelines are usually trained individually, so we suggest directly working with them instead.</p> <br/> <p>Pipelines do not offer any training functionality. You’ll notice PyTorch’s autograd is disabled by decorating the <code>__call__()</code> method with a <a href="https://pytorch.org/docs/stable/generated/torch.no_grad.html" rel="nofollow"><code>torch.no_grad</code></a> decorator because pipelines should not be used for training. If you’re interested in training, please take a look at the <a href="../../training/overview">Training</a> guides instead!</p>',Po,ke,Gi="The table below lists all the pipelines currently available in 🤗 Diffusers and the tasks they support. Click on a pipeline to view its abstract and published paper.",Do,Ce,Li='<thead><tr><th>Pipeline</th> <th>Tasks</th></tr></thead> <tbody><tr><td><a href="amused">aMUSEd</a></td> <td>text2image</td></tr> <tr><td><a href="animatediff">AnimateDiff</a></td> <td>text2video</td></tr> <tr><td><a href="attend_and_excite">Attend-and-Excite</a></td> <td>text2image</td></tr> <tr><td><a href="audioldm">AudioLDM</a></td> <td>text2audio</td></tr> <tr><td><a href="audioldm2">AudioLDM2</a></td> <td>text2audio</td></tr> <tr><td><a href="aura_flow">AuraFlow</a></td> <td>text2image</td></tr> <tr><td><a href="blip_diffusion">BLIP Diffusion</a></td> <td>text2image</td></tr> <tr><td><a href="bria_3_2">Bria 3.2</a></td> <td>text2image</td></tr> <tr><td><a href="cogvideox">CogVideoX</a></td> <td>text2video</td></tr> <tr><td><a href="consistency_models">Consistency Models</a></td> <td>unconditional image generation</td></tr> <tr><td><a href="controlnet">ControlNet</a></td> <td>text2image, image2image, inpainting</td></tr> <tr><td><a href="controlnet_flux">ControlNet with Flux.1</a></td> <td>text2image</td></tr> <tr><td><a href="controlnet_hunyuandit">ControlNet with Hunyuan-DiT</a></td> <td>text2image</td></tr> <tr><td><a href="controlnet_sd3">ControlNet with Stable Diffusion 3</a></td> <td>text2image</td></tr> <tr><td><a href="controlnet_sdxl">ControlNet with Stable Diffusion XL</a></td> <td>text2image</td></tr> <tr><td><a href="controlnetxs">ControlNet-XS</a></td> <td>text2image</td></tr> <tr><td><a href="controlnetxs_sdxl">ControlNet-XS with Stable Diffusion XL</a></td> <td>text2image</td></tr> <tr><td><a href="dance_diffusion">Dance Diffusion</a></td> <td>unconditional audio generation</td></tr> <tr><td><a href="ddim">DDIM</a></td> <td>unconditional image generation</td></tr> <tr><td><a href="ddpm">DDPM</a></td> <td>unconditional image generation</td></tr> <tr><td><a href="deepfloyd_if">DeepFloyd IF</a></td> <td>text2image, image2image, inpainting, super-resolution</td></tr> <tr><td><a href="diffedit">DiffEdit</a></td> <td>inpainting</td></tr> <tr><td><a href="dit">DiT</a></td> <td>text2image</td></tr> <tr><td><a href="flux">Flux</a></td> <td>text2image</td></tr> <tr><td><a href="hunyuandit">Hunyuan-DiT</a></td> <td>text2image</td></tr> <tr><td><a href="i2vgenxl">I2VGen-XL</a></td> <td>image2video</td></tr> <tr><td><a href="pix2pix">InstructPix2Pix</a></td> <td>image editing</td></tr> <tr><td><a href="kandinsky">Kandinsky 2.1</a></td> <td>text2image, image2image, inpainting, interpolation</td></tr> <tr><td><a href="kandinsky_v22">Kandinsky 2.2</a></td> <td>text2image, image2image, inpainting</td></tr> <tr><td><a href="kandinsky3">Kandinsky 3</a></td> <td>text2image, image2image</td></tr> <tr><td><a href="kolors">Kolors</a></td> <td>text2image</td></tr> <tr><td><a href="latent_consistency_models">Latent Consistency Models</a></td> <td>text2image</td></tr> <tr><td><a href="latent_diffusion">Latent Diffusion</a></td> <td>text2image, super-resolution</td></tr> <tr><td><a href="latte">Latte</a></td> <td>text2image</td></tr> <tr><td><a href="ledits_pp">LEDITS++</a></td> <td>image editing</td></tr> <tr><td><a href="lumina">Lumina-T2X</a></td> <td>text2image</td></tr> <tr><td><a href="marigold">Marigold</a></td> <td>depth-estimation, normals-estimation, intrinsic-decomposition</td></tr> <tr><td><a href="panorama">MultiDiffusion</a></td> <td>text2image</td></tr> <tr><td><a href="musicldm">MusicLDM</a></td> <td>text2audio</td></tr> <tr><td><a href="pag">PAG</a></td> <td>text2image</td></tr> <tr><td><a href="paint_by_example">Paint by Example</a></td> <td>inpainting</td></tr> <tr><td><a href="pia">PIA</a></td> <td>image2video</td></tr> <tr><td><a href="pixart">PixArt-α</a></td> <td>text2image</td></tr> <tr><td><a href="pixart_sigma">PixArt-Σ</a></td> <td>text2image</td></tr> <tr><td><a href="self_attention_guidance">Self-Attention Guidance</a></td> <td>text2image</td></tr> <tr><td><a href="semantic_stable_diffusion">Semantic Guidance</a></td> <td>text2image</td></tr> <tr><td><a href="shap_e">Shap-E</a></td> <td>text-to-3D, image-to-3D</td></tr> <tr><td><a href="stable_audio">Stable Audio</a></td> <td>text2audio</td></tr> <tr><td><a href="stable_cascade">Stable Cascade</a></td> <td>text2image</td></tr> <tr><td><a href="stable_diffusion/overview">Stable Diffusion</a></td> <td>text2image, image2image, depth2image, inpainting, image variation, latent upscaler, super-resolution</td></tr> <tr><td><a href="stable_diffusion/stable_diffusion_xl">Stable Diffusion XL</a></td> <td>text2image, image2image, inpainting</td></tr> <tr><td><a href="stable_diffusion/sdxl_turbo">Stable Diffusion XL Turbo</a></td> <td>text2image, image2image, inpainting</td></tr> <tr><td><a href="stable_unclip">Stable unCLIP</a></td> <td>text2image, image variation</td></tr> <tr><td><a href="stable_diffusion/adapter">T2I-Adapter</a></td> <td>text2image</td></tr> <tr><td><a href="text_to_video">Text2Video</a></td> <td>text2video, video2video</td></tr> <tr><td><a href="text_to_video_zero">Text2Video-Zero</a></td> <td>text2video</td></tr> <tr><td><a href="unclip">unCLIP</a></td> <td>text2image, image variation</td></tr> <tr><td><a href="unidiffuser">UniDiffuser</a></td> <td>text2image, image2text, image variation, text variation, unconditional image generation, unconditional audio generation</td></tr> <tr><td><a href="value_guided_sampling">Value-guided planning</a></td> <td>value guided sampling</td></tr> <tr><td><a href="wuerstchen">Wuerstchen</a></td> <td>text2image</td></tr> <tr><td><a href="visualcloze">VisualCloze</a></td> <td>text2image, image2image, subject driven generation, inpainting, style transfer, image restoration, image editing, [depth,normal,edge,pose]2image, [depth,normal,edge,pose]-estimation, virtual try-on, image relighting</td></tr></tbody>',Zo,Ue,jo,u,Pe,tn,gt,Hi="Base class for all pipelines.",on,bt,Wi=`<a href="/docs/diffusers/pr_12625/en/api/pipelines/overview#diffusers.DiffusionPipeline">DiffusionPipeline</a> stores all components (models, schedulers, and processors) for diffusion pipelines and | |
| provides methods for loading, downloading and saving models. It also includes methods to:`,nn,_t,Si="<li>move all PyTorch modules to the device of your choice</li> <li>enable/disable the progress bar for the denoising iteration</li>",sn,vt,Vi="Class attributes:",an,yt,Ni=`<li><strong>config_name</strong> (<code>str</code>) — The configuration filename that stores the class and module names of all the | |
| diffusion pipeline’s components.</li> <li><strong>_optional_components</strong> (<code>List[str]</code>) — List of all optional components that don’t have to be passed to the | |
| pipeline to function (should be overridden by subclasses).</li>`,rn,ee,De,ln,xt,Fi="Call self as a function.",dn,wt,Ze,fn,Z,je,cn,$t,Bi=`Performs Pipeline dtype and/or device conversion. A torch.dtype and torch.device are inferred from the | |
| arguments of <code>self.to(*args, **kwargs).</code>`,pn,Ie,Xi=`<p>> If the pipeline already has the correct torch.dtype and torch.device, then it is returned as is. | |
| Otherwise, > the returned pipeline is a copy of self with the desired torch.dtype and torch.device.</p>`,un,Mt,Ri="Here are the ways to call <code>to</code>:",mn,Tt,Ei=`<li><code>to(dtype, silence_dtype_warnings=False) → DiffusionPipeline</code> to return a pipeline with the specified | |
| <a href="https://pytorch.org/docs/stable/tensor_attributes.html#torch.dtype" rel="nofollow"><code>dtype</code></a></li> <li><code>to(device, silence_dtype_warnings=False) → DiffusionPipeline</code> to return a pipeline with the specified | |
| <a href="https://pytorch.org/docs/stable/tensor_attributes.html#torch.device" rel="nofollow"><code>device</code></a></li> <li><code>to(device=None, dtype=None, silence_dtype_warnings=False) → DiffusionPipeline</code> to return a pipeline with the | |
| specified <a href="https://pytorch.org/docs/stable/tensor_attributes.html#torch.device" rel="nofollow"><code>device</code></a> and | |
| <a href="https://pytorch.org/docs/stable/tensor_attributes.html#torch.dtype" rel="nofollow"><code>dtype</code></a></li>`,hn,G,Je,gn,kt,zi=`The <code>self.components</code> property can be useful to run different pipelines with the same weights and | |
| configurations without reallocating additional memory.`,bn,Ct,qi=`Returns (<code>dict</code>): | |
| A dictionary containing all the modules needed to initialize the pipeline.`,_n,te,vn,oe,Ge,yn,Ut,Yi=`Disable sliced attention computation. If <code>enable_attention_slicing</code> was previously called, attention is | |
| computed in one step.`,xn,ne,Le,wn,Pt,Ai='Disable memory efficient attention from <a href="https://facebookresearch.github.io/xformers/" rel="nofollow">xFormers</a>.',$n,q,He,Mn,Dt,Qi="Download and cache a PyTorch diffusion pipeline from pretrained pipeline weights.",Tn,We,Oi='<p>> To use private or <a href="https://huggingface.co/docs/hub/models-gated#gated-models" rel="nofollow">gated models</a>, log-in\nwith `hf > auth login</p>',kn,L,Se,Cn,Zt,Ki=`Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor | |
| in slices to compute attention in several steps. For more than one attention head, the computation is performed | |
| sequentially over each head. This is useful to save some memory in exchange for a small speed decrease.`,Un,Ve,es=`<p>> ⚠️ Don’t enable attention slicing if you’re already using <code>scaled_dot_product_attention</code> (SDPA) | |
| from PyTorch > 2.0 or xFormers. These attention computations are already very memory efficient so you won’t | |
| need to enable > this function. If you enable attention slicing with SDPA or xFormers, it can lead to serious | |
| slow downs!</p>`,Pn,ie,Dn,P,Ne,Zn,jt,ts=`Applies group offloading to the internal layers of a torch.nn.Module. To understand what group offloading is, | |
| and where it is beneficial, we need to first provide some context on how other supported offloading methods | |
| work.`,jn,It,os="Typically, offloading is done at two levels:",In,Jt,ns=`<li>Module-level: In Diffusers, this can be enabled using the <code>ModelMixin::enable_model_cpu_offload()</code> method. It | |
| works by offloading each component of a pipeline to the CPU for storage, and onloading to the accelerator | |
| device when needed for computation. This method is more memory-efficient than keeping all components on the | |
| accelerator, but the memory requirements are still quite high. For this method to work, one needs memory | |
| equivalent to size of the model in runtime dtype + size of largest intermediate activation tensors to be able | |
| to complete the forward pass.</li> <li>Leaf-level: In Diffusers, this can be enabled using the <code>ModelMixin::enable_sequential_cpu_offload()</code> method. | |
| It | |
| works by offloading the lowest leaf-level parameters of the computation graph to the CPU for storage, and | |
| onloading only the leafs to the accelerator device for computation. This uses the lowest amount of accelerator | |
| memory, but can be slower due to the excessive number of device synchronizations.</li>`,Jn,Gt,is=`Group offloading is a middle ground between the two methods. It works by offloading groups of internal layers, | |
| (either <code>torch.nn.ModuleList</code> or <code>torch.nn.Sequential</code>). This method uses lower memory than module-level | |
| offloading. It is also faster than leaf-level/sequential offloading, as the number of device synchronizations | |
| is reduced.`,Gn,Lt,ss=`Another supported feature (for CUDA devices with support for asynchronous data transfer streams) is the ability | |
| to overlap data transfer and computation to reduce the overall execution time compared to sequential | |
| offloading. This is enabled using layer prefetching with streams, i.e., the layer that is to be executed next | |
| starts onloading to the accelerator device while the current layer is being executed - this increases the | |
| memory requirements slightly. Note that this implementation also supports leaf-level offloading but can be made | |
| much faster when using streams.`,Ln,se,Hn,ae,Fe,Wn,Ht,as=`Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared | |
| to <code>enable_sequential_cpu_offload</code>, this method moves one whole model at a time to the accelerator when its | |
| <code>forward</code> method is called, and the model remains in accelerator until the next model runs. Memory savings are | |
| lower than with <code>enable_sequential_cpu_offload</code>, but performance is much better due to the iterative execution | |
| of the <code>unet</code>.`,Sn,re,Be,Vn,Wt,rs=`Offloads all models to CPU using 🤗 Accelerate, significantly reducing memory usage. When called, the state | |
| dicts of all <code>torch.nn.Module</code> components (except those in <code>self._exclude_from_cpu_offload</code>) are saved to CPU | |
| and then moved to <code>torch.device('meta')</code> and loaded to accelerator only when their specific submodule has its | |
| <code>forward</code> method called. Offloading happens on a submodule basis. Memory savings are higher than with | |
| <code>enable_model_cpu_offload</code>, but performance is lower.`,Nn,H,Xe,Fn,St,ls=`Enable memory efficient attention from <a href="https://facebookresearch.github.io/xformers/" rel="nofollow">xFormers</a>. When this | |
| option is enabled, you should observe lower GPU memory usage and a potential speed up during inference. Speed | |
| up during training is not guaranteed.`,Bn,Re,ds=`<p>> ⚠️ When memory efficient attention and sliced attention are both enabled, memory efficient | |
| attention takes > precedent.</p>`,Xn,le,Rn,Y,Ee,En,Vt,fs=`Create a new pipeline from a given pipeline. This method is useful to create a new pipeline from the existing | |
| pipeline components without reallocating additional memory.`,zn,de,qn,D,ze,Yn,Nt,cs="Instantiate a PyTorch diffusion pipeline from pretrained pipeline weights.",An,Ft,ps="The pipeline is set in evaluation mode (<code>model.eval()</code>) by default.",Qn,fe,On,qe,us=`<p>> To use private or <a href="https://huggingface.co/docs/hub/models-gated#gated-models" rel="nofollow">gated</a> models, log-in | |
| with <code>hf > auth login</code>.</p>`,Kn,ce,ei,W,Ye,ti,Bt,ms="Method that performs the following:",oi,Xt,hs=`<li>Offloads all components.</li> <li>Removes all model hooks that were added when using <code>enable_model_cpu_offload</code>, and then applies them again. | |
| In case the model has not been offloaded, this function is a no-op.</li> <li>Resets stateful diffusers hooks of denoiser components if they were added with | |
| <code>register_hook()</code>.</li>`,ni,Rt,gs=`Make sure to add this function to the end of the <code>__call__</code> function of your pipeline so that it functions | |
| correctly when applying <code>enable_model_cpu_offload</code>.`,ii,pe,Ae,si,Et,bs="Convert a NumPy image or a batch of images to a PIL image.",ai,ue,Qe,ri,zt,_s="Removes all hooks that were added when using <code>enable_sequential_cpu_offload</code> or <code>enable_model_cpu_offload</code>.",li,me,Oe,di,qt,vs="Resets the device maps (if any) to None.",fi,he,Ke,ci,Yt,ys=`Save all saveable variables of the pipeline to a directory. A pipeline variable can be saved and loaded if its | |
| class implements both a save and loading method. The pipeline is easily reloaded using the | |
| <a href="/docs/diffusers/pr_12625/en/api/pipelines/overview#diffusers.DiffusionPipeline.from_pretrained">from_pretrained()</a> class method.`,Io,I,et,pi,At,xs='Enables the FreeU mechanism as in <a href="https://huggingface.co/papers/2309.11497" rel="nofollow">https://huggingface.co/papers/2309.11497</a>.',ui,Qt,ws="The suffixes after the scaling factors represent the stages where they are being applied.",mi,Ot,$s=`Please refer to the <a href="https://github.com/ChenyangSi/FreeU" rel="nofollow">official repository</a> for combinations of the values | |
| that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.`,Jo,Q,tt,hi,Kt,Ms="Disables the FreeU mechanism if enabled.",Go,ot,Lo,V,nt,gi,eo,Ts="A Mixin to push a model, scheduler, or pipeline to the Hugging Face Hub.",bi,A,it,_i,to,ks="Upload model, scheduler, or pipeline files to the 🤗 Hugging Face Hub.",vi,ge,Ho,st,Wo,N,at,yi,oo,Cs=`Base class for all the official callbacks used in a pipeline. This class provides a structure for implementing | |
| custom callbacks and ensures that all callbacks have a consistent interface.`,xi,no,Us=`Please implement the following: | |
| <code>tensor_inputs</code>: This should return a list of tensor inputs specific to your callback. You will only be able to | |
| include | |
| variables listed in the <code>._callback_tensor_inputs</code> attribute of your pipeline class. | |
| <code>callback_fn</code>: This method defines the core functionality of your callback.`,So,F,rt,wi,io,Ps=`Callback function for Stable Diffusion Pipelines. After certain number of steps (set by <code>cutoff_step_ratio</code> or | |
| <code>cutoff_step_index</code>), this callback will disable the CFG.`,$i,so,Ds="Note: This callback mutates the pipeline by changing the <code>_guidance_scale</code> attribute to 0.0 after the cutoff step.",Vo,B,lt,Mi,ao,Zs=`Callback function for the base Stable Diffusion XL Pipelines. After certain number of steps (set by | |
| <code>cutoff_step_ratio</code> or <code>cutoff_step_index</code>), this callback will disable the CFG.`,Ti,ro,js="Note: This callback mutates the pipeline by changing the <code>_guidance_scale</code> attribute to 0.0 after the cutoff step.",No,X,dt,ki,lo,Is=`Callback function for the Controlnet Stable Diffusion XL Pipelines. After certain number of steps (set by | |
| <code>cutoff_step_ratio</code> or <code>cutoff_step_index</code>), this callback will disable the CFG.`,Ci,fo,Js="Note: This callback mutates the pipeline by changing the <code>_guidance_scale</code> attribute to 0.0 after the cutoff step.",Fo,R,ft,Ui,co,Gs=`Callback function for any pipeline that inherits <code>IPAdapterMixin</code>. After certain number of steps (set by | |
| <code>cutoff_step_ratio</code> or <code>cutoff_step_index</code>), this callback will set the IP Adapter scale to <code>0.0</code>.`,Pi,po,Ls="Note: This callback mutates the IP Adapter attention processors by setting the scale to 0.0 after the cutoff step.",Bo,E,ct,Di,uo,Hs=`Callback function for Stable Diffusion 3 Pipelines. After certain number of steps (set by <code>cutoff_step_ratio</code> or | |
| <code>cutoff_step_index</code>), this callback will disable the CFG.`,Zi,mo,Ws="Note: This callback mutates the pipeline by changing the <code>_guidance_scale</code> attribute to 0.0 after the cutoff step.",Xo,pt,Ro,Mo,Eo;return x=new Xs({props:{containerStyle:"float: right; margin-left: 10px; display: inline-flex; position: relative; z-index: 10;"}}),$=new en({props:{title:"Pipelines",local:"pipelines",headingTag:"h1"}}),Ue=new en({props:{title:"DiffusionPipeline",local:"diffusers.DiffusionPipeline",headingTag:"h2"}}),Pe=new k({props:{name:"class diffusers.DiffusionPipeline",anchor:"diffusers.DiffusionPipeline",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12625/src/diffusers/pipelines/pipeline_utils.py#L182"}}),De=new k({props:{name:"__call__",anchor:"diffusers.DiffusionPipeline.__call__",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}]}}),Ze=new k({props:{name:"device",anchor:"diffusers.DiffusionPipeline.device",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12625/src/diffusers/pipelines/pipeline_utils.py#L563",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>The torch device on which the pipeline is located.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>torch.device</code></p> | |
| `}}),je=new k({props:{name:"to",anchor:"diffusers.DiffusionPipeline.to",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.DiffusionPipeline.to.dtype",description:`<strong>dtype</strong> (<code>torch.dtype</code>, <em>optional</em>) — | |
| Returns a pipeline with the specified | |
| <a href="https://pytorch.org/docs/stable/tensor_attributes.html#torch.dtype" rel="nofollow"><code>dtype</code></a>`,name:"dtype"},{anchor:"diffusers.DiffusionPipeline.to.device",description:`<strong>device</strong> (<code>torch.Device</code>, <em>optional</em>) — | |
| Returns a pipeline with the specified | |
| <a href="https://pytorch.org/docs/stable/tensor_attributes.html#torch.device" rel="nofollow"><code>device</code></a>`,name:"device"},{anchor:"diffusers.DiffusionPipeline.to.silence_dtype_warnings",description:`<strong>silence_dtype_warnings</strong> (<code>str</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether to omit warnings if the target <code>dtype</code> is not compatible with the target <code>device</code>.`,name:"silence_dtype_warnings"}],source:"https://github.com/huggingface/diffusers/blob/vr_12625/src/diffusers/pipelines/pipeline_utils.py#L371",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>The pipeline converted to specified <code>dtype</code> and/or <code>dtype</code>.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><a | |
| href="/docs/diffusers/pr_12625/en/api/pipelines/overview#diffusers.DiffusionPipeline" | |
| >DiffusionPipeline</a></p> | |
| `}}),Je=new k({props:{name:"components",anchor:"diffusers.DiffusionPipeline.components",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12625/src/diffusers/pipelines/pipeline_utils.py#L1862"}}),te=new xe({props:{anchor:"diffusers.DiffusionPipeline.components.example",$$slots:{default:[Es]},$$scope:{ctx:U}}}),Ge=new k({props:{name:"disable_attention_slicing",anchor:"diffusers.DiffusionPipeline.disable_attention_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12625/src/diffusers/pipelines/pipeline_utils.py#L2024"}}),Le=new k({props:{name:"disable_xformers_memory_efficient_attention",anchor:"diffusers.DiffusionPipeline.disable_xformers_memory_efficient_attention",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12625/src/diffusers/pipelines/pipeline_utils.py#L1961"}}),He=new k({props:{name:"download",anchor:"diffusers.DiffusionPipeline.download",parameters:[{name:"pretrained_model_name",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.DiffusionPipeline.download.pretrained_model_name",description:`<strong>pretrained_model_name</strong> (<code>str</code> or <code>os.PathLike</code>, <em>optional</em>) — | |
| A string, the <em>repository id</em> (for example <code>CompVis/ldm-text2im-large-256</code>) of a pretrained pipeline | |
| hosted on the Hub.`,name:"pretrained_model_name"},{anchor:"diffusers.DiffusionPipeline.download.custom_pipeline",description:`<strong>custom_pipeline</strong> (<code>str</code>, <em>optional</em>) — | |
| Can be either:</p> | |
| <ul> | |
| <li> | |
| <p>A string, the <em>repository id</em> (for example <code>CompVis/ldm-text2im-large-256</code>) of a pretrained | |
| pipeline hosted on the Hub. The repository must contain a file called <code>pipeline.py</code> that defines | |
| the custom pipeline.</p> | |
| </li> | |
| <li> | |
| <p>A string, the <em>file name</em> of a community pipeline hosted on GitHub under | |
| <a href="https://github.com/huggingface/diffusers/tree/main/examples/community" rel="nofollow">Community</a>. Valid file | |
| names must match the file name and not the pipeline script (<code>clip_guided_stable_diffusion</code> | |
| instead of <code>clip_guided_stable_diffusion.py</code>). Community pipelines are always loaded from the | |
| current <code>main</code> branch of GitHub.</p> | |
| </li> | |
| <li> | |
| <p>A path to a <em>directory</em> (<code>./my_pipeline_directory/</code>) containing a custom pipeline. The directory | |
| must contain a file called <code>pipeline.py</code> that defines the custom pipeline.</p> | |
| </li> | |
| </ul> | |
| <blockquote class="warning"> | |
| <p>> 🧪 This is an experimental feature and may change in the future.</p> | |
| </blockquote> | |
| <p>For more information on how to load and create custom pipelines, take a look at <a href="https://huggingface.co/docs/diffusers/main/en/using-diffusers/contribute_pipeline" rel="nofollow">How to contribute a | |
| community pipeline</a>.`,name:"custom_pipeline"},{anchor:"diffusers.DiffusionPipeline.download.force_download",description:`<strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether or not to force the (re-)download of the model weights and configuration files, overriding the | |
| cached versions if they exist.`,name:"force_download"},{anchor:"diffusers.DiffusionPipeline.download.proxies",description:`<strong>proxies</strong> (<code>Dict[str, str]</code>, <em>optional</em>) — | |
| A dictionary of proxy servers to use by protocol or endpoint, for example, <code>{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}</code>. The proxies are used on each request.`,name:"proxies"},{anchor:"diffusers.DiffusionPipeline.download.output_loading_info(bool,",description:`<strong>output_loading_info(<code>bool</code>,</strong> <em>optional</em>, defaults to <code>False</code>) — | |
| Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"diffusers.DiffusionPipeline.download.local_files_only",description:`<strong>local_files_only</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether to only load local model weights and configuration files or not. If set to <code>True</code>, the model | |
| won’t be downloaded from the Hub.`,name:"local_files_only"},{anchor:"diffusers.DiffusionPipeline.download.token",description:`<strong>token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) — | |
| The token to use as HTTP bearer authorization for remote files. If <code>True</code>, the token generated from | |
| <code>diffusers-cli login</code> (stored in <code>~/.huggingface</code>) is used.`,name:"token"},{anchor:"diffusers.DiffusionPipeline.download.revision",description:`<strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"main"</code>) — | |
| The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier | |
| allowed by Git.`,name:"revision"},{anchor:"diffusers.DiffusionPipeline.download.custom_revision",description:`<strong>custom_revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"main"</code>) — | |
| The specific model version to use. It can be a branch name, a tag name, or a commit id similar to | |
| <code>revision</code> when loading a custom pipeline from the Hub. It can be a 🤗 Diffusers version when loading a | |
| custom pipeline from GitHub, otherwise it defaults to <code>"main"</code> when loading from the Hub.`,name:"custom_revision"},{anchor:"diffusers.DiffusionPipeline.download.mirror",description:`<strong>mirror</strong> (<code>str</code>, <em>optional</em>) — | |
| Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not | |
| guarantee the timeliness or safety of the source, and you should refer to the mirror site for more | |
| information.`,name:"mirror"},{anchor:"diffusers.DiffusionPipeline.download.variant",description:`<strong>variant</strong> (<code>str</code>, <em>optional</em>) — | |
| Load weights from a specified variant filename such as <code>"fp16"</code> or <code>"ema"</code>. This is ignored when | |
| loading <code>from_flax</code>.`,name:"variant"},{anchor:"diffusers.DiffusionPipeline.download.dduf_file(str,",description:`<strong>dduf_file(<code>str</code>,</strong> <em>optional</em>) — | |
| Load weights from the specified DDUF file.`,name:"dduf_file(str,"},{anchor:"diffusers.DiffusionPipeline.download.use_safetensors",description:`<strong>use_safetensors</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>None</code>) — | |
| If set to <code>None</code>, the safetensors weights are downloaded if they’re available <strong>and</strong> if the | |
| safetensors library is installed. If set to <code>True</code>, the model is forcibly loaded from safetensors | |
| weights. If set to <code>False</code>, safetensors weights are not loaded.`,name:"use_safetensors"},{anchor:"diffusers.DiffusionPipeline.download.use_onnx",description:`<strong>use_onnx</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| If set to <code>True</code>, ONNX weights will always be downloaded if present. If set to <code>False</code>, ONNX weights | |
| will never be downloaded. By default <code>use_onnx</code> defaults to the <code>_is_onnx</code> class attribute which is | |
| <code>False</code> for non-ONNX pipelines and <code>True</code> for ONNX pipelines. ONNX weights include both files ending | |
| with <code>.onnx</code> and <code>.pb</code>.`,name:"use_onnx"},{anchor:"diffusers.DiffusionPipeline.download.trust_remote_code",description:`<strong>trust_remote_code</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether or not to allow for custom pipelines and components defined on the Hub in their own files. This | |
| option should only be set to <code>True</code> for repositories you trust and in which you have read the code, as | |
| it will execute code present on the Hub on your local machine.`,name:"trust_remote_code"}],source:"https://github.com/huggingface/diffusers/blob/vr_12625/src/diffusers/pipelines/pipeline_utils.py#L1478",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>A path to the downloaded pipeline.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>os.PathLike</code></p> | |
| `}}),Se=new k({props:{name:"enable_attention_slicing",anchor:"diffusers.DiffusionPipeline.enable_attention_slicing",parameters:[{name:"slice_size",val:": typing.Union[int, str, NoneType] = 'auto'"}],parametersDescription:[{anchor:"diffusers.DiffusionPipeline.enable_attention_slicing.slice_size",description:`<strong>slice_size</strong> (<code>str</code> or <code>int</code>, <em>optional</em>, defaults to <code>"auto"</code>) — | |
| When <code>"auto"</code>, halves the input to the attention heads, so attention will be computed in two steps. If | |
| <code>"max"</code>, maximum amount of memory will be saved by running only one slice at a time. If a number is | |
| provided, uses as many slices as <code>attention_head_dim // slice_size</code>. In this case, <code>attention_head_dim</code> | |
| must be a multiple of <code>slice_size</code>.`,name:"slice_size"}],source:"https://github.com/huggingface/diffusers/blob/vr_12625/src/diffusers/pipelines/pipeline_utils.py#L1987"}}),ie=new xe({props:{anchor:"diffusers.DiffusionPipeline.enable_attention_slicing.example",$$slots:{default:[zs]},$$scope:{ctx:U}}}),Ne=new k({props:{name:"enable_group_offload",anchor:"diffusers.DiffusionPipeline.enable_group_offload",parameters:[{name:"onload_device",val:": device"},{name:"offload_device",val:": device = device(type='cpu')"},{name:"offload_type",val:": str = 'block_level'"},{name:"num_blocks_per_group",val:": typing.Optional[int] = None"},{name:"non_blocking",val:": bool = False"},{name:"use_stream",val:": bool = False"},{name:"record_stream",val:": bool = False"},{name:"low_cpu_mem_usage",val:" = False"},{name:"offload_to_disk_path",val:": typing.Optional[str] = None"},{name:"exclude_modules",val:": typing.Union[str, typing.List[str], NoneType] = None"}],parametersDescription:[{anchor:"diffusers.DiffusionPipeline.enable_group_offload.onload_device",description:`<strong>onload_device</strong> (<code>torch.device</code>) — | |
| The device to which the group of modules are onloaded.`,name:"onload_device"},{anchor:"diffusers.DiffusionPipeline.enable_group_offload.offload_device",description:`<strong>offload_device</strong> (<code>torch.device</code>, defaults to <code>torch.device("cpu")</code>) — | |
| The device to which the group of modules are offloaded. This should typically be the CPU. Default is | |
| CPU.`,name:"offload_device"},{anchor:"diffusers.DiffusionPipeline.enable_group_offload.offload_type",description:`<strong>offload_type</strong> (<code>str</code> or <code>GroupOffloadingType</code>, defaults to “block_level”) — | |
| The type of offloading to be applied. Can be one of “block_level” or “leaf_level”. Default is | |
| “block_level”.`,name:"offload_type"},{anchor:"diffusers.DiffusionPipeline.enable_group_offload.offload_to_disk_path",description:`<strong>offload_to_disk_path</strong> (<code>str</code>, <em>optional</em>, defaults to <code>None</code>) — | |
| The path to the directory where parameters will be offloaded. Setting this option can be useful in | |
| limited RAM environment settings where a reasonable speed-memory trade-off is desired.`,name:"offload_to_disk_path"},{anchor:"diffusers.DiffusionPipeline.enable_group_offload.num_blocks_per_group",description:`<strong>num_blocks_per_group</strong> (<code>int</code>, <em>optional</em>) — | |
| The number of blocks per group when using offload_type=“block_level”. This is required when using | |
| offload_type=“block_level”.`,name:"num_blocks_per_group"},{anchor:"diffusers.DiffusionPipeline.enable_group_offload.non_blocking",description:`<strong>non_blocking</strong> (<code>bool</code>, defaults to <code>False</code>) — | |
| If True, offloading and onloading is done with non-blocking data transfer.`,name:"non_blocking"},{anchor:"diffusers.DiffusionPipeline.enable_group_offload.use_stream",description:`<strong>use_stream</strong> (<code>bool</code>, defaults to <code>False</code>) — | |
| If True, offloading and onloading is done asynchronously using a CUDA stream. This can be useful for | |
| overlapping computation and data transfer.`,name:"use_stream"},{anchor:"diffusers.DiffusionPipeline.enable_group_offload.record_stream",description:`<strong>record_stream</strong> (<code>bool</code>, defaults to <code>False</code>) — When enabled with <code>use_stream</code>, it marks the current tensor | |
| as having been used by this stream. It is faster at the expense of slightly more memory usage. Refer to | |
| the <a href="https://pytorch.org/docs/stable/generated/torch.Tensor.record_stream.html" rel="nofollow">PyTorch official docs</a> | |
| more details.`,name:"record_stream"},{anchor:"diffusers.DiffusionPipeline.enable_group_offload.low_cpu_mem_usage",description:`<strong>low_cpu_mem_usage</strong> (<code>bool</code>, defaults to <code>False</code>) — | |
| If True, the CPU memory usage is minimized by pinning tensors on-the-fly instead of pre-pinning them. | |
| This option only matters when using streamed CPU offloading (i.e. <code>use_stream=True</code>). This can be | |
| useful when the CPU memory is a bottleneck but may counteract the benefits of using streams.`,name:"low_cpu_mem_usage"},{anchor:"diffusers.DiffusionPipeline.enable_group_offload.exclude_modules",description:"<strong>exclude_modules</strong> (<code>Union[str, List[str]]</code>, defaults to <code>None</code>) — List of modules to exclude from offloading.",name:"exclude_modules"}],source:"https://github.com/huggingface/diffusers/blob/vr_12625/src/diffusers/pipelines/pipeline_utils.py#L1338"}}),se=new xe({props:{anchor:"diffusers.DiffusionPipeline.enable_group_offload.example",$$slots:{default:[qs]},$$scope:{ctx:U}}}),Fe=new k({props:{name:"enable_model_cpu_offload",anchor:"diffusers.DiffusionPipeline.enable_model_cpu_offload",parameters:[{name:"gpu_id",val:": typing.Optional[int] = None"},{name:"device",val:": typing.Union[torch.device, str] = None"}],parametersDescription:[{anchor:"diffusers.DiffusionPipeline.enable_model_cpu_offload.gpu_id",description:`<strong>gpu_id</strong> (<code>int</code>, <em>optional</em>) — | |
| The ID of the accelerator that shall be used in inference. If not specified, it will default to 0.`,name:"gpu_id"},{anchor:"diffusers.DiffusionPipeline.enable_model_cpu_offload.device",description:`<strong>device</strong> (<code>torch.Device</code> or <code>str</code>, <em>optional</em>, defaults to None) — | |
| The PyTorch device type of the accelerator that shall be used in inference. If not specified, it will | |
| automatically detect the available accelerator and use.`,name:"device"}],source:"https://github.com/huggingface/diffusers/blob/vr_12625/src/diffusers/pipelines/pipeline_utils.py#L1155"}}),Be=new k({props:{name:"enable_sequential_cpu_offload",anchor:"diffusers.DiffusionPipeline.enable_sequential_cpu_offload",parameters:[{name:"gpu_id",val:": typing.Optional[int] = None"},{name:"device",val:": typing.Union[torch.device, str] = None"}],parametersDescription:[{anchor:"diffusers.DiffusionPipeline.enable_sequential_cpu_offload.gpu_id",description:`<strong>gpu_id</strong> (<code>int</code>, <em>optional</em>) — | |
| The ID of the accelerator that shall be used in inference. If not specified, it will default to 0.`,name:"gpu_id"},{anchor:"diffusers.DiffusionPipeline.enable_sequential_cpu_offload.device",description:`<strong>device</strong> (<code>torch.Device</code> or <code>str</code>, <em>optional</em>, defaults to None) — | |
| The PyTorch device type of the accelerator that shall be used in inference. If not specified, it will | |
| automatically detect the available accelerator and use.`,name:"device"}],source:"https://github.com/huggingface/diffusers/blob/vr_12625/src/diffusers/pipelines/pipeline_utils.py#L1271"}}),Xe=new k({props:{name:"enable_xformers_memory_efficient_attention",anchor:"diffusers.DiffusionPipeline.enable_xformers_memory_efficient_attention",parameters:[{name:"attention_op",val:": typing.Optional[typing.Callable] = None"}],parametersDescription:[{anchor:"diffusers.DiffusionPipeline.enable_xformers_memory_efficient_attention.attention_op",description:`<strong>attention_op</strong> (<code>Callable</code>, <em>optional</em>) — | |
| Override the default <code>None</code> operator for use as <code>op</code> argument to the | |
| <a href="https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.memory_efficient_attention" rel="nofollow"><code>memory_efficient_attention()</code></a> | |
| function of xFormers.`,name:"attention_op"}],source:"https://github.com/huggingface/diffusers/blob/vr_12625/src/diffusers/pipelines/pipeline_utils.py#L1930"}}),le=new xe({props:{anchor:"diffusers.DiffusionPipeline.enable_xformers_memory_efficient_attention.example",$$slots:{default:[Ys]},$$scope:{ctx:U}}}),Ee=new k({props:{name:"from_pipe",anchor:"diffusers.DiffusionPipeline.from_pipe",parameters:[{name:"pipeline",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.DiffusionPipeline.from_pipe.pipeline",description:`<strong>pipeline</strong> (<code>DiffusionPipeline</code>) — | |
| The pipeline from which to create a new pipeline.`,name:"pipeline"}],source:"https://github.com/huggingface/diffusers/blob/vr_12625/src/diffusers/pipelines/pipeline_utils.py#L2040",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>A new pipeline with the same weights and configurations as <code>pipeline</code>.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>DiffusionPipeline</code></p> | |
| `}}),de=new xe({props:{anchor:"diffusers.DiffusionPipeline.from_pipe.example",$$slots:{default:[As]},$$scope:{ctx:U}}}),ze=new k({props:{name:"from_pretrained",anchor:"diffusers.DiffusionPipeline.from_pretrained",parameters:[{name:"pretrained_model_name_or_path",val:": typing.Union[str, os.PathLike, NoneType]"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path",description:`<strong>pretrained_model_name_or_path</strong> (<code>str</code> or <code>os.PathLike</code>, <em>optional</em>) — | |
| Can be either:</p> | |
| <ul> | |
| <li>A string, the <em>repo id</em> (for example <code>CompVis/ldm-text2im-large-256</code>) of a pretrained pipeline | |
| hosted on the Hub.</li> | |
| <li>A path to a <em>directory</em> (for example <code>./my_pipeline_directory/</code>) containing pipeline weights | |
| saved using | |
| <a href="/docs/diffusers/pr_12625/en/api/pipelines/overview#diffusers.DiffusionPipeline.save_pretrained">save_pretrained()</a>.</li> | |
| <li>A path to a <em>directory</em> (for example <code>./my_pipeline_directory/</code>) containing a dduf file</li> | |
| </ul>`,name:"pretrained_model_name_or_path"},{anchor:"diffusers.DiffusionPipeline.from_pretrained.torch_dtype",description:`<strong>torch_dtype</strong> (<code>torch.dtype</code> or <code>dict[str, Union[str, torch.dtype]]</code>, <em>optional</em>) — | |
| Override the default <code>torch.dtype</code> and load the model with another dtype. To load submodels with | |
| different dtype pass a <code>dict</code> (for example <code>{'transformer': torch.bfloat16, 'vae': torch.float16}</code>). | |
| Set the default dtype for unspecified components with <code>default</code> (for example <code>{'transformer': torch.bfloat16, 'default': torch.float16}</code>). If a component is not specified and no default is set, | |
| <code>torch.float32</code> is used.`,name:"torch_dtype"},{anchor:"diffusers.DiffusionPipeline.from_pretrained.custom_pipeline",description:`<strong>custom_pipeline</strong> (<code>str</code>, <em>optional</em>) —</p> | |
| <blockquote class="warning"> | |
| <p>> 🧪 This is an experimental feature and may change in the future.</p> | |
| </blockquote> | |
| <p>Can be either:</p> | |
| <ul> | |
| <li>A string, the <em>repo id</em> (for example <code>hf-internal-testing/diffusers-dummy-pipeline</code>) of a custom | |
| pipeline hosted on the Hub. The repository must contain a file called pipeline.py that defines | |
| the custom pipeline.</li> | |
| <li>A string, the <em>file name</em> of a community pipeline hosted on GitHub under | |
| <a href="https://github.com/huggingface/diffusers/tree/main/examples/community" rel="nofollow">Community</a>. Valid file | |
| names must match the file name and not the pipeline script (<code>clip_guided_stable_diffusion</code> | |
| instead of <code>clip_guided_stable_diffusion.py</code>). Community pipelines are always loaded from the | |
| current main branch of GitHub.</li> | |
| <li>A path to a directory (<code>./my_pipeline_directory/</code>) containing a custom pipeline. The directory | |
| must contain a file called <code>pipeline.py</code> that defines the custom pipeline.</li> | |
| </ul> | |
| <p>For more information on how to load and create custom pipelines, please have a look at <a href="https://huggingface.co/docs/diffusers/using-diffusers/custom_pipeline_overview" rel="nofollow">Loading and | |
| Adding Custom | |
| Pipelines</a>`,name:"custom_pipeline"},{anchor:"diffusers.DiffusionPipeline.from_pretrained.force_download",description:`<strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether or not to force the (re-)download of the model weights and configuration files, overriding the | |
| cached versions if they exist.`,name:"force_download"},{anchor:"diffusers.DiffusionPipeline.from_pretrained.cache_dir",description:`<strong>cache_dir</strong> (<code>Union[str, os.PathLike]</code>, <em>optional</em>) — | |
| Path to a directory where a downloaded pretrained model configuration is cached if the standard cache | |
| is not used.`,name:"cache_dir"},{anchor:"diffusers.DiffusionPipeline.from_pretrained.proxies",description:`<strong>proxies</strong> (<code>Dict[str, str]</code>, <em>optional</em>) — | |
| A dictionary of proxy servers to use by protocol or endpoint, for example, <code>{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}</code>. The proxies are used on each request.`,name:"proxies"},{anchor:"diffusers.DiffusionPipeline.from_pretrained.output_loading_info(bool,",description:`<strong>output_loading_info(<code>bool</code>,</strong> <em>optional</em>, defaults to <code>False</code>) — | |
| Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.`,name:"output_loading_info(bool,"},{anchor:"diffusers.DiffusionPipeline.from_pretrained.local_files_only",description:`<strong>local_files_only</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether to only load local model weights and configuration files or not. If set to <code>True</code>, the model | |
| won’t be downloaded from the Hub.`,name:"local_files_only"},{anchor:"diffusers.DiffusionPipeline.from_pretrained.token",description:`<strong>token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) — | |
| The token to use as HTTP bearer authorization for remote files. If <code>True</code>, the token generated from | |
| <code>diffusers-cli login</code> (stored in <code>~/.huggingface</code>) is used.`,name:"token"},{anchor:"diffusers.DiffusionPipeline.from_pretrained.revision",description:`<strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"main"</code>) — | |
| The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier | |
| allowed by Git.`,name:"revision"},{anchor:"diffusers.DiffusionPipeline.from_pretrained.custom_revision",description:`<strong>custom_revision</strong> (<code>str</code>, <em>optional</em>) — | |
| The specific model version to use. It can be a branch name, a tag name, or a commit id similar to | |
| <code>revision</code> when loading a custom pipeline from the Hub. Defaults to the latest stable 🤗 Diffusers | |
| version.`,name:"custom_revision"},{anchor:"diffusers.DiffusionPipeline.from_pretrained.mirror",description:`<strong>mirror</strong> (<code>str</code>, <em>optional</em>) — | |
| Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not | |
| guarantee the timeliness or safety of the source, and you should refer to the mirror site for more | |
| information.`,name:"mirror"},{anchor:"diffusers.DiffusionPipeline.from_pretrained.device_map",description:`<strong>device_map</strong> (<code>str</code>, <em>optional</em>) — | |
| Strategy that dictates how the different components of a pipeline should be placed on available | |
| devices. Currently, only “balanced” <code>device_map</code> is supported. Check out | |
| <a href="https://huggingface.co/docs/diffusers/main/en/tutorials/inference_with_big_models#device-placement" rel="nofollow">this</a> | |
| to know more.`,name:"device_map"},{anchor:"diffusers.DiffusionPipeline.from_pretrained.max_memory",description:`<strong>max_memory</strong> (<code>Dict</code>, <em>optional</em>) — | |
| A dictionary device identifier for the maximum memory. Will default to the maximum memory available for | |
| each GPU and the available CPU RAM if unset.`,name:"max_memory"},{anchor:"diffusers.DiffusionPipeline.from_pretrained.offload_folder",description:`<strong>offload_folder</strong> (<code>str</code> or <code>os.PathLike</code>, <em>optional</em>) — | |
| The path to offload weights if device_map contains the value <code>"disk"</code>.`,name:"offload_folder"},{anchor:"diffusers.DiffusionPipeline.from_pretrained.offload_state_dict",description:`<strong>offload_state_dict</strong> (<code>bool</code>, <em>optional</em>) — | |
| If <code>True</code>, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if | |
| the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to <code>True</code> | |
| when there is some disk offload.`,name:"offload_state_dict"},{anchor:"diffusers.DiffusionPipeline.from_pretrained.low_cpu_mem_usage",description:`<strong>low_cpu_mem_usage</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code> if torch version >= 1.9.0 else <code>False</code>) — | |
| Speed up model loading only loading the pretrained weights and not initializing the weights. This also | |
| tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. | |
| Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this | |
| argument to <code>True</code> will raise an error.`,name:"low_cpu_mem_usage"},{anchor:"diffusers.DiffusionPipeline.from_pretrained.use_safetensors",description:`<strong>use_safetensors</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>None</code>) — | |
| If set to <code>None</code>, the safetensors weights are downloaded if they’re available <strong>and</strong> if the | |
| safetensors library is installed. If set to <code>True</code>, the model is forcibly loaded from safetensors | |
| weights. If set to <code>False</code>, safetensors weights are not loaded.`,name:"use_safetensors"},{anchor:"diffusers.DiffusionPipeline.from_pretrained.use_onnx",description:`<strong>use_onnx</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>None</code>) — | |
| If set to <code>True</code>, ONNX weights will always be downloaded if present. If set to <code>False</code>, ONNX weights | |
| will never be downloaded. By default <code>use_onnx</code> defaults to the <code>_is_onnx</code> class attribute which is | |
| <code>False</code> for non-ONNX pipelines and <code>True</code> for ONNX pipelines. ONNX weights include both files ending | |
| with <code>.onnx</code> and <code>.pb</code>.`,name:"use_onnx"},{anchor:"diffusers.DiffusionPipeline.from_pretrained.kwargs",description:`<strong>kwargs</strong> (remaining dictionary of keyword arguments, <em>optional</em>) — | |
| Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline | |
| class). The overwritten components are passed directly to the pipelines <code>__init__</code> method. See example | |
| below for more information.`,name:"kwargs"},{anchor:"diffusers.DiffusionPipeline.from_pretrained.variant",description:`<strong>variant</strong> (<code>str</code>, <em>optional</em>) — | |
| Load weights from a specified variant filename such as <code>"fp16"</code> or <code>"ema"</code>. This is ignored when | |
| loading <code>from_flax</code>.`,name:"variant"},{anchor:"diffusers.DiffusionPipeline.from_pretrained.dduf_file(str,",description:`<strong>dduf_file(<code>str</code>,</strong> <em>optional</em>) — | |
| Load weights from the specified dduf file.`,name:"dduf_file(str,"}],source:"https://github.com/huggingface/diffusers/blob/vr_12625/src/diffusers/pipelines/pipeline_utils.py#L593"}}),fe=new xe({props:{anchor:"diffusers.DiffusionPipeline.from_pretrained.example",$$slots:{default:[Qs]},$$scope:{ctx:U}}}),ce=new xe({props:{anchor:"diffusers.DiffusionPipeline.from_pretrained.example-2",$$slots:{default:[Os]},$$scope:{ctx:U}}}),Ye=new k({props:{name:"maybe_free_model_hooks",anchor:"diffusers.DiffusionPipeline.maybe_free_model_hooks",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12625/src/diffusers/pipelines/pipeline_utils.py#L1248"}}),Ae=new k({props:{name:"numpy_to_pil",anchor:"diffusers.DiffusionPipeline.numpy_to_pil",parameters:[{name:"images",val:""}],source:"https://github.com/huggingface/diffusers/blob/vr_12625/src/diffusers/pipelines/pipeline_utils.py#L1900"}}),Qe=new k({props:{name:"remove_all_hooks",anchor:"diffusers.DiffusionPipeline.remove_all_hooks",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12625/src/diffusers/pipelines/pipeline_utils.py#L1146"}}),Oe=new k({props:{name:"reset_device_map",anchor:"diffusers.DiffusionPipeline.reset_device_map",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12625/src/diffusers/pipelines/pipeline_utils.py#L1465"}}),Ke=new k({props:{name:"save_pretrained",anchor:"diffusers.DiffusionPipeline.save_pretrained",parameters:[{name:"save_directory",val:": typing.Union[str, os.PathLike]"},{name:"safe_serialization",val:": bool = True"},{name:"variant",val:": typing.Optional[str] = None"},{name:"max_shard_size",val:": typing.Union[int, str, NoneType] = None"},{name:"push_to_hub",val:": bool = False"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.DiffusionPipeline.save_pretrained.save_directory",description:`<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) — | |
| Directory to save a pipeline to. Will be created if it doesn’t exist.`,name:"save_directory"},{anchor:"diffusers.DiffusionPipeline.save_pretrained.safe_serialization",description:`<strong>safe_serialization</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether to save the model using <code>safetensors</code> or the traditional PyTorch way with <code>pickle</code>.`,name:"safe_serialization"},{anchor:"diffusers.DiffusionPipeline.save_pretrained.variant",description:`<strong>variant</strong> (<code>str</code>, <em>optional</em>) — | |
| If specified, weights are saved in the format <code>pytorch_model.<variant>.bin</code>.`,name:"variant"},{anchor:"diffusers.DiffusionPipeline.save_pretrained.max_shard_size",description:`<strong>max_shard_size</strong> (<code>int</code> or <code>str</code>, defaults to <code>None</code>) — | |
| The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size | |
| lower than this size. If expressed as a string, needs to be digits followed by a unit (like <code>"5GB"</code>). | |
| If expressed as an integer, the unit is bytes. Note that this limit will be decreased after a certain | |
| period of time (starting from Oct 2024) to allow users to upgrade to the latest version of <code>diffusers</code>. | |
| This is to establish a common default size for this argument across different libraries in the Hugging | |
| Face ecosystem (<code>transformers</code>, and <code>accelerate</code>, for example).`,name:"max_shard_size"},{anchor:"diffusers.DiffusionPipeline.save_pretrained.push_to_hub",description:`<strong>push_to_hub</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the | |
| repository you want to push to with <code>repo_id</code> (will default to the name of <code>save_directory</code> in your | |
| namespace).`,name:"push_to_hub"},{anchor:"diffusers.DiffusionPipeline.save_pretrained.kwargs",description:`<strong>kwargs</strong> (<code>Dict[str, Any]</code>, <em>optional</em>) — | |
| Additional keyword arguments passed along to the <a href="/docs/diffusers/pr_12625/en/api/schedulers/overview#diffusers.utils.PushToHubMixin.push_to_hub">push_to_hub()</a> method.`,name:"kwargs"}],source:"https://github.com/huggingface/diffusers/blob/vr_12625/src/diffusers/pipelines/pipeline_utils.py#L238"}}),et=new k({props:{name:"diffusers.StableDiffusionMixin.enable_freeu",anchor:"diffusers.StableDiffusionMixin.enable_freeu",parameters:[{name:"s1",val:": float"},{name:"s2",val:": float"},{name:"b1",val:": float"},{name:"b2",val:": float"}],parametersDescription:[{anchor:"diffusers.StableDiffusionMixin.enable_freeu.s1",description:`<strong>s1</strong> (<code>float</code>) — | |
| Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to | |
| mitigate “oversmoothing effect” in the enhanced denoising process.`,name:"s1"},{anchor:"diffusers.StableDiffusionMixin.enable_freeu.s2",description:`<strong>s2</strong> (<code>float</code>) — | |
| Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to | |
| mitigate “oversmoothing effect” in the enhanced denoising process.`,name:"s2"},{anchor:"diffusers.StableDiffusionMixin.enable_freeu.b1",description:"<strong>b1</strong> (<code>float</code>) — Scaling factor for stage 1 to amplify the contributions of backbone features.",name:"b1"},{anchor:"diffusers.StableDiffusionMixin.enable_freeu.b2",description:"<strong>b2</strong> (<code>float</code>) — Scaling factor for stage 2 to amplify the contributions of backbone features.",name:"b2"}],source:"https://github.com/huggingface/diffusers/blob/vr_12625/src/diffusers/pipelines/pipeline_utils.py#L2242"}}),tt=new k({props:{name:"diffusers.StableDiffusionMixin.disable_freeu",anchor:"diffusers.StableDiffusionMixin.disable_freeu",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12625/src/diffusers/pipelines/pipeline_utils.py#L2264"}}),ot=new en({props:{title:"PushToHubMixin",local:"diffusers.utils.PushToHubMixin",headingTag:"h2"}}),nt=new k({props:{name:"class diffusers.utils.PushToHubMixin",anchor:"diffusers.utils.PushToHubMixin",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12625/src/diffusers/utils/hub_utils.py#L465"}}),it=new k({props:{name:"push_to_hub",anchor:"diffusers.utils.PushToHubMixin.push_to_hub",parameters:[{name:"repo_id",val:": str"},{name:"commit_message",val:": typing.Optional[str] = None"},{name:"private",val:": typing.Optional[bool] = None"},{name:"token",val:": typing.Optional[str] = None"},{name:"create_pr",val:": bool = False"},{name:"safe_serialization",val:": bool = True"},{name:"variant",val:": typing.Optional[str] = None"},{name:"subfolder",val:": typing.Optional[str] = None"}],parametersDescription:[{anchor:"diffusers.utils.PushToHubMixin.push_to_hub.repo_id",description:`<strong>repo_id</strong> (<code>str</code>) — | |
| The name of the repository you want to push your model, scheduler, or pipeline files to. It should | |
| contain your organization name when pushing to an organization. <code>repo_id</code> can also be a path to a local | |
| directory.`,name:"repo_id"},{anchor:"diffusers.utils.PushToHubMixin.push_to_hub.commit_message",description:`<strong>commit_message</strong> (<code>str</code>, <em>optional</em>) — | |
| Message to commit while pushing. Default to <code>"Upload {object}"</code>.`,name:"commit_message"},{anchor:"diffusers.utils.PushToHubMixin.push_to_hub.private",description:`<strong>private</strong> (<code>bool</code>, <em>optional</em>) — | |
| Whether to make the repo private. If <code>None</code> (default), the repo will be public unless the | |
| organization’s default is private. This value is ignored if the repo already exists.`,name:"private"},{anchor:"diffusers.utils.PushToHubMixin.push_to_hub.token",description:`<strong>token</strong> (<code>str</code>, <em>optional</em>) — | |
| The token to use as HTTP bearer authorization for remote files. The token generated when running <code>hf auth login</code> (stored in <code>~/.huggingface</code>).`,name:"token"},{anchor:"diffusers.utils.PushToHubMixin.push_to_hub.create_pr",description:`<strong>create_pr</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether or not to create a PR with the uploaded files or directly commit.`,name:"create_pr"},{anchor:"diffusers.utils.PushToHubMixin.push_to_hub.safe_serialization",description:`<strong>safe_serialization</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to convert the model weights to the <code>safetensors</code> format.`,name:"safe_serialization"},{anchor:"diffusers.utils.PushToHubMixin.push_to_hub.variant",description:`<strong>variant</strong> (<code>str</code>, <em>optional</em>) — | |
| If specified, weights are saved in the format <code>pytorch_model.<variant>.bin</code>.`,name:"variant"}],source:"https://github.com/huggingface/diffusers/blob/vr_12625/src/diffusers/utils/hub_utils.py#L500"}}),ge=new xe({props:{anchor:"diffusers.utils.PushToHubMixin.push_to_hub.example",$$slots:{default:[Ks]},$$scope:{ctx:U}}}),st=new en({props:{title:"Callbacks",local:"diffusers.callbacks.PipelineCallback",headingTag:"h2"}}),at=new k({props:{name:"class diffusers.callbacks.PipelineCallback",anchor:"diffusers.callbacks.PipelineCallback",parameters:[{name:"cutoff_step_ratio",val:" = 1.0"},{name:"cutoff_step_index",val:" = None"}],source:"https://github.com/huggingface/diffusers/blob/vr_12625/src/diffusers/callbacks.py#L7"}}),rt=new k({props:{name:"class diffusers.callbacks.SDCFGCutoffCallback",anchor:"diffusers.callbacks.SDCFGCutoffCallback",parameters:[{name:"cutoff_step_ratio",val:" = 1.0"},{name:"cutoff_step_index",val:" = None"}],source:"https://github.com/huggingface/diffusers/blob/vr_12625/src/diffusers/callbacks.py#L69"}}),lt=new k({props:{name:"class diffusers.callbacks.SDXLCFGCutoffCallback",anchor:"diffusers.callbacks.SDXLCFGCutoffCallback",parameters:[{name:"cutoff_step_ratio",val:" = 1.0"},{name:"cutoff_step_index",val:" = None"}],source:"https://github.com/huggingface/diffusers/blob/vr_12625/src/diffusers/callbacks.py#L98"}}),dt=new k({props:{name:"class diffusers.callbacks.SDXLControlnetCFGCutoffCallback",anchor:"diffusers.callbacks.SDXLControlnetCFGCutoffCallback",parameters:[{name:"cutoff_step_ratio",val:" = 1.0"},{name:"cutoff_step_index",val:" = None"}],source:"https://github.com/huggingface/diffusers/blob/vr_12625/src/diffusers/callbacks.py#L140"}}),ft=new k({props:{name:"class diffusers.callbacks.IPAdapterScaleCutoffCallback",anchor:"diffusers.callbacks.IPAdapterScaleCutoffCallback",parameters:[{name:"cutoff_step_ratio",val:" = 1.0"},{name:"cutoff_step_index",val:" = None"}],source:"https://github.com/huggingface/diffusers/blob/vr_12625/src/diffusers/callbacks.py#L188"}}),ct=new k({props:{name:"class diffusers.callbacks.SD3CFGCutoffCallback",anchor:"diffusers.callbacks.SD3CFGCutoffCallback",parameters:[{name:"cutoff_step_ratio",val:" = 1.0"},{name:"cutoff_step_index",val:" = None"}],source:"https://github.com/huggingface/diffusers/blob/vr_12625/src/diffusers/callbacks.py#L212"}}),pt=new Rs({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/overview.md"}}),{c(){l=s("meta"),C=n(),w=s("p"),f=n(),h(x.$$.fragment),o=n(),h($.$$.fragment),ko=n(),Me=s("p"),Me.textContent=ji,Co=n(),Te=s("p"),Te.innerHTML=Ii,Uo=n(),K=s("blockquote"),K.innerHTML=Ji,Po=n(),ke=s("p"),ke.textContent=Gi,Do=n(),Ce=s("table"),Ce.innerHTML=Li,Zo=n(),h(Ue.$$.fragment),jo=n(),u=s("div"),h(Pe.$$.fragment),tn=n(),gt=s("p"),gt.textContent=Hi,on=n(),bt=s("p"),bt.innerHTML=Wi,nn=n(),_t=s("ul"),_t.innerHTML=Si,sn=n(),vt=s("p"),vt.textContent=Vi,an=n(),yt=s("ul"),yt.innerHTML=Ni,rn=n(),ee=s("div"),h(De.$$.fragment),ln=n(),xt=s("p"),xt.textContent=Fi,dn=n(),wt=s("div"),h(Ze.$$.fragment),fn=n(),Z=s("div"),h(je.$$.fragment),cn=n(),$t=s("p"),$t.innerHTML=Bi,pn=n(),Ie=s("blockquote"),Ie.innerHTML=Xi,un=n(),Mt=s("p"),Mt.innerHTML=Ri,mn=n(),Tt=s("ul"),Tt.innerHTML=Ei,hn=n(),G=s("div"),h(Je.$$.fragment),gn=n(),kt=s("p"),kt.innerHTML=zi,bn=n(),Ct=s("p"),Ct.innerHTML=qi,_n=n(),h(te.$$.fragment),vn=n(),oe=s("div"),h(Ge.$$.fragment),yn=n(),Ut=s("p"),Ut.innerHTML=Yi,xn=n(),ne=s("div"),h(Le.$$.fragment),wn=n(),Pt=s("p"),Pt.innerHTML=Ai,$n=n(),q=s("div"),h(He.$$.fragment),Mn=n(),Dt=s("p"),Dt.textContent=Qi,Tn=n(),We=s("blockquote"),We.innerHTML=Oi,kn=n(),L=s("div"),h(Se.$$.fragment),Cn=n(),Zt=s("p"),Zt.textContent=Ki,Un=n(),Ve=s("blockquote"),Ve.innerHTML=es,Pn=n(),h(ie.$$.fragment),Dn=n(),P=s("div"),h(Ne.$$.fragment),Zn=n(),jt=s("p"),jt.textContent=ts,jn=n(),It=s("p"),It.textContent=os,In=n(),Jt=s("ul"),Jt.innerHTML=ns,Jn=n(),Gt=s("p"),Gt.innerHTML=is,Gn=n(),Lt=s("p"),Lt.textContent=ss,Ln=n(),h(se.$$.fragment),Hn=n(),ae=s("div"),h(Fe.$$.fragment),Wn=n(),Ht=s("p"),Ht.innerHTML=as,Sn=n(),re=s("div"),h(Be.$$.fragment),Vn=n(),Wt=s("p"),Wt.innerHTML=rs,Nn=n(),H=s("div"),h(Xe.$$.fragment),Fn=n(),St=s("p"),St.innerHTML=ls,Bn=n(),Re=s("blockquote"),Re.innerHTML=ds,Xn=n(),h(le.$$.fragment),Rn=n(),Y=s("div"),h(Ee.$$.fragment),En=n(),Vt=s("p"),Vt.textContent=fs,zn=n(),h(de.$$.fragment),qn=n(),D=s("div"),h(ze.$$.fragment),Yn=n(),Nt=s("p"),Nt.textContent=cs,An=n(),Ft=s("p"),Ft.innerHTML=ps,Qn=n(),h(fe.$$.fragment),On=n(),qe=s("blockquote"),qe.innerHTML=us,Kn=n(),h(ce.$$.fragment),ei=n(),W=s("div"),h(Ye.$$.fragment),ti=n(),Bt=s("p"),Bt.textContent=ms,oi=n(),Xt=s("ul"),Xt.innerHTML=hs,ni=n(),Rt=s("p"),Rt.innerHTML=gs,ii=n(),pe=s("div"),h(Ae.$$.fragment),si=n(),Et=s("p"),Et.textContent=bs,ai=n(),ue=s("div"),h(Qe.$$.fragment),ri=n(),zt=s("p"),zt.innerHTML=_s,li=n(),me=s("div"),h(Oe.$$.fragment),di=n(),qt=s("p"),qt.textContent=vs,fi=n(),he=s("div"),h(Ke.$$.fragment),ci=n(),Yt=s("p"),Yt.innerHTML=ys,Io=n(),I=s("div"),h(et.$$.fragment),pi=n(),At=s("p"),At.innerHTML=xs,ui=n(),Qt=s("p"),Qt.textContent=ws,mi=n(),Ot=s("p"),Ot.innerHTML=$s,Jo=n(),Q=s("div"),h(tt.$$.fragment),hi=n(),Kt=s("p"),Kt.textContent=Ms,Go=n(),h(ot.$$.fragment),Lo=n(),V=s("div"),h(nt.$$.fragment),gi=n(),eo=s("p"),eo.textContent=Ts,bi=n(),A=s("div"),h(it.$$.fragment),_i=n(),to=s("p"),to.textContent=ks,vi=n(),h(ge.$$.fragment),Ho=n(),h(st.$$.fragment),Wo=n(),N=s("div"),h(at.$$.fragment),yi=n(),oo=s("p"),oo.textContent=Cs,xi=n(),no=s("p"),no.innerHTML=Us,So=n(),F=s("div"),h(rt.$$.fragment),wi=n(),io=s("p"),io.innerHTML=Ps,$i=n(),so=s("p"),so.innerHTML=Ds,Vo=n(),B=s("div"),h(lt.$$.fragment),Mi=n(),ao=s("p"),ao.innerHTML=Zs,Ti=n(),ro=s("p"),ro.innerHTML=js,No=n(),X=s("div"),h(dt.$$.fragment),ki=n(),lo=s("p"),lo.innerHTML=Is,Ci=n(),fo=s("p"),fo.innerHTML=Js,Fo=n(),R=s("div"),h(ft.$$.fragment),Ui=n(),co=s("p"),co.innerHTML=Gs,Pi=n(),po=s("p"),po.textContent=Ls,Bo=n(),E=s("div"),h(ct.$$.fragment),Di=n(),uo=s("p"),uo.innerHTML=Hs,Zi=n(),mo=s("p"),mo.innerHTML=Ws,Xo=n(),h(pt.$$.fragment),Ro=n(),Mo=s("p"),this.h()},l(e){const c=Bs("svelte-u9bgzb",document.head);l=a(c,"META",{name:!0,content:!0}),c.forEach(r),C=i(e),w=a(e,"P",{}),T(w).forEach(r),f=i(e),g(x.$$.fragment,e),o=i(e),g($.$$.fragment,e),ko=i(e),Me=a(e,"P",{"data-svelte-h":!0}),d(Me)!=="svelte-eejdx6"&&(Me.textContent=ji),Co=i(e),Te=a(e,"P",{"data-svelte-h":!0}),d(Te)!=="svelte-1wxf88t"&&(Te.innerHTML=Ii),Uo=i(e),K=a(e,"BLOCKQUOTE",{class:!0,"data-svelte-h":!0}),d(K)!=="svelte-pc365r"&&(K.innerHTML=Ji),Po=i(e),ke=a(e,"P",{"data-svelte-h":!0}),d(ke)!=="svelte-2xd39c"&&(ke.textContent=Gi),Do=i(e),Ce=a(e,"TABLE",{"data-svelte-h":!0}),d(Ce)!=="svelte-1yiijew"&&(Ce.innerHTML=Li),Zo=i(e),g(Ue.$$.fragment,e),jo=i(e),u=a(e,"DIV",{class:!0});var m=T(u);g(Pe.$$.fragment,m),tn=i(m),gt=a(m,"P",{"data-svelte-h":!0}),d(gt)!=="svelte-1neg3rw"&&(gt.textContent=Hi),on=i(m),bt=a(m,"P",{"data-svelte-h":!0}),d(bt)!=="svelte-18l5d9m"&&(bt.innerHTML=Wi),nn=i(m),_t=a(m,"UL",{"data-svelte-h":!0}),d(_t)!=="svelte-2a5chx"&&(_t.innerHTML=Si),sn=i(m),vt=a(m,"P",{"data-svelte-h":!0}),d(vt)!=="svelte-wz8va1"&&(vt.textContent=Vi),an=i(m),yt=a(m,"UL",{"data-svelte-h":!0}),d(yt)!=="svelte-1ilr6sx"&&(yt.innerHTML=Ni),rn=i(m),ee=a(m,"DIV",{class:!0});var ut=T(ee);g(De.$$.fragment,ut),ln=i(ut),xt=a(ut,"P",{"data-svelte-h":!0}),d(xt)!=="svelte-1eed40t"&&(xt.textContent=Fi),ut.forEach(r),dn=i(m),wt=a(m,"DIV",{class:!0});var To=T(wt);g(Ze.$$.fragment,To),To.forEach(r),fn=i(m),Z=a(m,"DIV",{class:!0});var J=T(Z);g(je.$$.fragment,J),cn=i(J),$t=a(J,"P",{"data-svelte-h":!0}),d($t)!=="svelte-1vbhnip"&&($t.innerHTML=Bi),pn=i(J),Ie=a(J,"BLOCKQUOTE",{class:!0,"data-svelte-h":!0}),d(Ie)!=="svelte-ja1ewp"&&(Ie.innerHTML=Xi),un=i(J),Mt=a(J,"P",{"data-svelte-h":!0}),d(Mt)!=="svelte-5ul9n2"&&(Mt.innerHTML=Ri),mn=i(J),Tt=a(J,"UL",{"data-svelte-h":!0}),d(Tt)!=="svelte-1icy6l9"&&(Tt.innerHTML=Ei),J.forEach(r),hn=i(m),G=a(m,"DIV",{class:!0});var z=T(G);g(Je.$$.fragment,z),gn=i(z),kt=a(z,"P",{"data-svelte-h":!0}),d(kt)!=="svelte-had4mb"&&(kt.innerHTML=zi),bn=i(z),Ct=a(z,"P",{"data-svelte-h":!0}),d(Ct)!=="svelte-19k67ce"&&(Ct.innerHTML=qi),_n=i(z),g(te.$$.fragment,z),z.forEach(r),vn=i(m),oe=a(m,"DIV",{class:!0});var mt=T(oe);g(Ge.$$.fragment,mt),yn=i(mt),Ut=a(mt,"P",{"data-svelte-h":!0}),d(Ut)!=="svelte-1lh0nh5"&&(Ut.innerHTML=Yi),mt.forEach(r),xn=i(m),ne=a(m,"DIV",{class:!0});var ht=T(ne);g(Le.$$.fragment,ht),wn=i(ht),Pt=a(ht,"P",{"data-svelte-h":!0}),d(Pt)!=="svelte-1vfte1e"&&(Pt.innerHTML=Ai),ht.forEach(r),$n=i(m),q=a(m,"DIV",{class:!0});var O=T(q);g(He.$$.fragment,O),Mn=i(O),Dt=a(O,"P",{"data-svelte-h":!0}),d(Dt)!=="svelte-1mfctah"&&(Dt.textContent=Qi),Tn=i(O),We=a(O,"BLOCKQUOTE",{class:!0,"data-svelte-h":!0}),d(We)!=="svelte-n1m6lp"&&(We.innerHTML=Oi),O.forEach(r),kn=i(m),L=a(m,"DIV",{class:!0});var be=T(L);g(Se.$$.fragment,be),Cn=i(be),Zt=a(be,"P",{"data-svelte-h":!0}),d(Zt)!=="svelte-10jaql7"&&(Zt.textContent=Ki),Un=i(be),Ve=a(be,"BLOCKQUOTE",{class:!0,"data-svelte-h":!0}),d(Ve)!=="svelte-zc9fvp"&&(Ve.innerHTML=es),Pn=i(be),g(ie.$$.fragment,be),be.forEach(r),Dn=i(m),P=a(m,"DIV",{class:!0});var j=T(P);g(Ne.$$.fragment,j),Zn=i(j),jt=a(j,"P",{"data-svelte-h":!0}),d(jt)!=="svelte-1umao2b"&&(jt.textContent=ts),jn=i(j),It=a(j,"P",{"data-svelte-h":!0}),d(It)!=="svelte-1b92jfk"&&(It.textContent=os),In=i(j),Jt=a(j,"UL",{"data-svelte-h":!0}),d(Jt)!=="svelte-1n0e5q1"&&(Jt.innerHTML=ns),Jn=i(j),Gt=a(j,"P",{"data-svelte-h":!0}),d(Gt)!=="svelte-1yl7dpl"&&(Gt.innerHTML=is),Gn=i(j),Lt=a(j,"P",{"data-svelte-h":!0}),d(Lt)!=="svelte-v0d0nn"&&(Lt.textContent=ss),Ln=i(j),g(se.$$.fragment,j),j.forEach(r),Hn=i(m),ae=a(m,"DIV",{class:!0});var zo=T(ae);g(Fe.$$.fragment,zo),Wn=i(zo),Ht=a(zo,"P",{"data-svelte-h":!0}),d(Ht)!=="svelte-9hgqai"&&(Ht.innerHTML=as),zo.forEach(r),Sn=i(m),re=a(m,"DIV",{class:!0});var qo=T(re);g(Be.$$.fragment,qo),Vn=i(qo),Wt=a(qo,"P",{"data-svelte-h":!0}),d(Wt)!=="svelte-1bs0ixe"&&(Wt.innerHTML=rs),qo.forEach(r),Nn=i(m),H=a(m,"DIV",{class:!0});var _e=T(H);g(Xe.$$.fragment,_e),Fn=i(_e),St=a(_e,"P",{"data-svelte-h":!0}),d(St)!=="svelte-e03q3e"&&(St.innerHTML=ls),Bn=i(_e),Re=a(_e,"BLOCKQUOTE",{class:!0,"data-svelte-h":!0}),d(Re)!=="svelte-3y0te0"&&(Re.innerHTML=ds),Xn=i(_e),g(le.$$.fragment,_e),_e.forEach(r),Rn=i(m),Y=a(m,"DIV",{class:!0});var ho=T(Y);g(Ee.$$.fragment,ho),En=i(ho),Vt=a(ho,"P",{"data-svelte-h":!0}),d(Vt)!=="svelte-1jx3icn"&&(Vt.textContent=fs),zn=i(ho),g(de.$$.fragment,ho),ho.forEach(r),qn=i(m),D=a(m,"DIV",{class:!0});var S=T(D);g(ze.$$.fragment,S),Yn=i(S),Nt=a(S,"P",{"data-svelte-h":!0}),d(Nt)!=="svelte-ccbjek"&&(Nt.textContent=cs),An=i(S),Ft=a(S,"P",{"data-svelte-h":!0}),d(Ft)!=="svelte-1p5vgmd"&&(Ft.innerHTML=ps),Qn=i(S),g(fe.$$.fragment,S),On=i(S),qe=a(S,"BLOCKQUOTE",{class:!0,"data-svelte-h":!0}),d(qe)!=="svelte-zg8xkk"&&(qe.innerHTML=us),Kn=i(S),g(ce.$$.fragment,S),S.forEach(r),ei=i(m),W=a(m,"DIV",{class:!0});var ve=T(W);g(Ye.$$.fragment,ve),ti=i(ve),Bt=a(ve,"P",{"data-svelte-h":!0}),d(Bt)!=="svelte-z79kjq"&&(Bt.textContent=ms),oi=i(ve),Xt=a(ve,"UL",{"data-svelte-h":!0}),d(Xt)!=="svelte-1ju7puc"&&(Xt.innerHTML=hs),ni=i(ve),Rt=a(ve,"P",{"data-svelte-h":!0}),d(Rt)!=="svelte-1pqppti"&&(Rt.innerHTML=gs),ve.forEach(r),ii=i(m),pe=a(m,"DIV",{class:!0});var Yo=T(pe);g(Ae.$$.fragment,Yo),si=i(Yo),Et=a(Yo,"P",{"data-svelte-h":!0}),d(Et)!=="svelte-1gut907"&&(Et.textContent=bs),Yo.forEach(r),ai=i(m),ue=a(m,"DIV",{class:!0});var Ao=T(ue);g(Qe.$$.fragment,Ao),ri=i(Ao),zt=a(Ao,"P",{"data-svelte-h":!0}),d(zt)!=="svelte-8kophh"&&(zt.innerHTML=_s),Ao.forEach(r),li=i(m),me=a(m,"DIV",{class:!0});var Qo=T(me);g(Oe.$$.fragment,Qo),di=i(Qo),qt=a(Qo,"P",{"data-svelte-h":!0}),d(qt)!=="svelte-6tjysh"&&(qt.textContent=vs),Qo.forEach(r),fi=i(m),he=a(m,"DIV",{class:!0});var Oo=T(he);g(Ke.$$.fragment,Oo),ci=i(Oo),Yt=a(Oo,"P",{"data-svelte-h":!0}),d(Yt)!=="svelte-1r2mfwe"&&(Yt.innerHTML=ys),Oo.forEach(r),m.forEach(r),Io=i(e),I=a(e,"DIV",{class:!0});var ye=T(I);g(et.$$.fragment,ye),pi=i(ye),At=a(ye,"P",{"data-svelte-h":!0}),d(At)!=="svelte-192jnkh"&&(At.innerHTML=xs),ui=i(ye),Qt=a(ye,"P",{"data-svelte-h":!0}),d(Qt)!=="svelte-xjijlp"&&(Qt.textContent=ws),mi=i(ye),Ot=a(ye,"P",{"data-svelte-h":!0}),d(Ot)!=="svelte-hy0nzs"&&(Ot.innerHTML=$s),ye.forEach(r),Jo=i(e),Q=a(e,"DIV",{class:!0});var Ko=T(Q);g(tt.$$.fragment,Ko),hi=i(Ko),Kt=a(Ko,"P",{"data-svelte-h":!0}),d(Kt)!=="svelte-psupw6"&&(Kt.textContent=Ms),Ko.forEach(r),Go=i(e),g(ot.$$.fragment,e),Lo=i(e),V=a(e,"DIV",{class:!0});var go=T(V);g(nt.$$.fragment,go),gi=i(go),eo=a(go,"P",{"data-svelte-h":!0}),d(eo)!=="svelte-7y8zei"&&(eo.textContent=Ts),bi=i(go),A=a(go,"DIV",{class:!0});var bo=T(A);g(it.$$.fragment,bo),_i=i(bo),to=a(bo,"P",{"data-svelte-h":!0}),d(to)!=="svelte-5nfhmh"&&(to.textContent=ks),vi=i(bo),g(ge.$$.fragment,bo),bo.forEach(r),go.forEach(r),Ho=i(e),g(st.$$.fragment,e),Wo=i(e),N=a(e,"DIV",{class:!0});var _o=T(N);g(at.$$.fragment,_o),yi=i(_o),oo=a(_o,"P",{"data-svelte-h":!0}),d(oo)!=="svelte-2ua579"&&(oo.textContent=Cs),xi=i(_o),no=a(_o,"P",{"data-svelte-h":!0}),d(no)!=="svelte-qrped9"&&(no.innerHTML=Us),_o.forEach(r),So=i(e),F=a(e,"DIV",{class:!0});var vo=T(F);g(rt.$$.fragment,vo),wi=i(vo),io=a(vo,"P",{"data-svelte-h":!0}),d(io)!=="svelte-1lxhnsk"&&(io.innerHTML=Ps),$i=i(vo),so=a(vo,"P",{"data-svelte-h":!0}),d(so)!=="svelte-twxvjk"&&(so.innerHTML=Ds),vo.forEach(r),Vo=i(e),B=a(e,"DIV",{class:!0});var yo=T(B);g(lt.$$.fragment,yo),Mi=i(yo),ao=a(yo,"P",{"data-svelte-h":!0}),d(ao)!=="svelte-1pnjwf0"&&(ao.innerHTML=Zs),Ti=i(yo),ro=a(yo,"P",{"data-svelte-h":!0}),d(ro)!=="svelte-twxvjk"&&(ro.innerHTML=js),yo.forEach(r),No=i(e),X=a(e,"DIV",{class:!0});var xo=T(X);g(dt.$$.fragment,xo),ki=i(xo),lo=a(xo,"P",{"data-svelte-h":!0}),d(lo)!=="svelte-wsvz4z"&&(lo.innerHTML=Is),Ci=i(xo),fo=a(xo,"P",{"data-svelte-h":!0}),d(fo)!=="svelte-twxvjk"&&(fo.innerHTML=Js),xo.forEach(r),Fo=i(e),R=a(e,"DIV",{class:!0});var wo=T(R);g(ft.$$.fragment,wo),Ui=i(wo),co=a(wo,"P",{"data-svelte-h":!0}),d(co)!=="svelte-1pf8icy"&&(co.innerHTML=Gs),Pi=i(wo),po=a(wo,"P",{"data-svelte-h":!0}),d(po)!=="svelte-1kg1eq5"&&(po.textContent=Ls),wo.forEach(r),Bo=i(e),E=a(e,"DIV",{class:!0});var $o=T(E);g(ct.$$.fragment,$o),Di=i($o),uo=a($o,"P",{"data-svelte-h":!0}),d(uo)!=="svelte-461fyb"&&(uo.innerHTML=Hs),Zi=i($o),mo=a($o,"P",{"data-svelte-h":!0}),d(mo)!=="svelte-twxvjk"&&(mo.innerHTML=Ws),$o.forEach(r),Xo=i(e),g(pt.$$.fragment,e),Ro=i(e),Mo=a(e,"P",{}),T(Mo).forEach(r),this.h()},h(){M(l,"name","hf:doc:metadata"),M(l,"content",ta),M(K,"class","warning"),M(ee,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(wt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(Ie,"class","tip"),M(Z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(G,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(oe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(ne,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(We,"class","tip"),M(q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(Ve,"class","warning"),M(L,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(P,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(ae,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(re,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(Re,"class","warning"),M(H,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(Y,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(qe,"class","tip"),M(D,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(W,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(pe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(ue,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(me,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(he,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(u,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(I,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(Q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(A,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(V,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(N,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(F,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(B,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(X,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(R,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(E,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,c){t(document.head,l),p(e,C,c),p(e,w,c),p(e,f,c),b(x,e,c),p(e,o,c),b($,e,c),p(e,ko,c),p(e,Me,c),p(e,Co,c),p(e,Te,c),p(e,Uo,c),p(e,K,c),p(e,Po,c),p(e,ke,c),p(e,Do,c),p(e,Ce,c),p(e,Zo,c),b(Ue,e,c),p(e,jo,c),p(e,u,c),b(Pe,u,null),t(u,tn),t(u,gt),t(u,on),t(u,bt),t(u,nn),t(u,_t),t(u,sn),t(u,vt),t(u,an),t(u,yt),t(u,rn),t(u,ee),b(De,ee,null),t(ee,ln),t(ee,xt),t(u,dn),t(u,wt),b(Ze,wt,null),t(u,fn),t(u,Z),b(je,Z,null),t(Z,cn),t(Z,$t),t(Z,pn),t(Z,Ie),t(Z,un),t(Z,Mt),t(Z,mn),t(Z,Tt),t(u,hn),t(u,G),b(Je,G,null),t(G,gn),t(G,kt),t(G,bn),t(G,Ct),t(G,_n),b(te,G,null),t(u,vn),t(u,oe),b(Ge,oe,null),t(oe,yn),t(oe,Ut),t(u,xn),t(u,ne),b(Le,ne,null),t(ne,wn),t(ne,Pt),t(u,$n),t(u,q),b(He,q,null),t(q,Mn),t(q,Dt),t(q,Tn),t(q,We),t(u,kn),t(u,L),b(Se,L,null),t(L,Cn),t(L,Zt),t(L,Un),t(L,Ve),t(L,Pn),b(ie,L,null),t(u,Dn),t(u,P),b(Ne,P,null),t(P,Zn),t(P,jt),t(P,jn),t(P,It),t(P,In),t(P,Jt),t(P,Jn),t(P,Gt),t(P,Gn),t(P,Lt),t(P,Ln),b(se,P,null),t(u,Hn),t(u,ae),b(Fe,ae,null),t(ae,Wn),t(ae,Ht),t(u,Sn),t(u,re),b(Be,re,null),t(re,Vn),t(re,Wt),t(u,Nn),t(u,H),b(Xe,H,null),t(H,Fn),t(H,St),t(H,Bn),t(H,Re),t(H,Xn),b(le,H,null),t(u,Rn),t(u,Y),b(Ee,Y,null),t(Y,En),t(Y,Vt),t(Y,zn),b(de,Y,null),t(u,qn),t(u,D),b(ze,D,null),t(D,Yn),t(D,Nt),t(D,An),t(D,Ft),t(D,Qn),b(fe,D,null),t(D,On),t(D,qe),t(D,Kn),b(ce,D,null),t(u,ei),t(u,W),b(Ye,W,null),t(W,ti),t(W,Bt),t(W,oi),t(W,Xt),t(W,ni),t(W,Rt),t(u,ii),t(u,pe),b(Ae,pe,null),t(pe,si),t(pe,Et),t(u,ai),t(u,ue),b(Qe,ue,null),t(ue,ri),t(ue,zt),t(u,li),t(u,me),b(Oe,me,null),t(me,di),t(me,qt),t(u,fi),t(u,he),b(Ke,he,null),t(he,ci),t(he,Yt),p(e,Io,c),p(e,I,c),b(et,I,null),t(I,pi),t(I,At),t(I,ui),t(I,Qt),t(I,mi),t(I,Ot),p(e,Jo,c),p(e,Q,c),b(tt,Q,null),t(Q,hi),t(Q,Kt),p(e,Go,c),b(ot,e,c),p(e,Lo,c),p(e,V,c),b(nt,V,null),t(V,gi),t(V,eo),t(V,bi),t(V,A),b(it,A,null),t(A,_i),t(A,to),t(A,vi),b(ge,A,null),p(e,Ho,c),b(st,e,c),p(e,Wo,c),p(e,N,c),b(at,N,null),t(N,yi),t(N,oo),t(N,xi),t(N,no),p(e,So,c),p(e,F,c),b(rt,F,null),t(F,wi),t(F,io),t(F,$i),t(F,so),p(e,Vo,c),p(e,B,c),b(lt,B,null),t(B,Mi),t(B,ao),t(B,Ti),t(B,ro),p(e,No,c),p(e,X,c),b(dt,X,null),t(X,ki),t(X,lo),t(X,Ci),t(X,fo),p(e,Fo,c),p(e,R,c),b(ft,R,null),t(R,Ui),t(R,co),t(R,Pi),t(R,po),p(e,Bo,c),p(e,E,c),b(ct,E,null),t(E,Di),t(E,uo),t(E,Zi),t(E,mo),p(e,Xo,c),b(pt,e,c),p(e,Ro,c),p(e,Mo,c),Eo=!0},p(e,[c]){const m={};c&2&&(m.$$scope={dirty:c,ctx:e}),te.$set(m);const ut={};c&2&&(ut.$$scope={dirty:c,ctx:e}),ie.$set(ut);const To={};c&2&&(To.$$scope={dirty:c,ctx:e}),se.$set(To);const J={};c&2&&(J.$$scope={dirty:c,ctx:e}),le.$set(J);const z={};c&2&&(z.$$scope={dirty:c,ctx:e}),de.$set(z);const mt={};c&2&&(mt.$$scope={dirty:c,ctx:e}),fe.$set(mt);const ht={};c&2&&(ht.$$scope={dirty:c,ctx:e}),ce.$set(ht);const O={};c&2&&(O.$$scope={dirty:c,ctx:e}),ge.$set(O)},i(e){Eo||(_(x.$$.fragment,e),_($.$$.fragment,e),_(Ue.$$.fragment,e),_(Pe.$$.fragment,e),_(De.$$.fragment,e),_(Ze.$$.fragment,e),_(je.$$.fragment,e),_(Je.$$.fragment,e),_(te.$$.fragment,e),_(Ge.$$.fragment,e),_(Le.$$.fragment,e),_(He.$$.fragment,e),_(Se.$$.fragment,e),_(ie.$$.fragment,e),_(Ne.$$.fragment,e),_(se.$$.fragment,e),_(Fe.$$.fragment,e),_(Be.$$.fragment,e),_(Xe.$$.fragment,e),_(le.$$.fragment,e),_(Ee.$$.fragment,e),_(de.$$.fragment,e),_(ze.$$.fragment,e),_(fe.$$.fragment,e),_(ce.$$.fragment,e),_(Ye.$$.fragment,e),_(Ae.$$.fragment,e),_(Qe.$$.fragment,e),_(Oe.$$.fragment,e),_(Ke.$$.fragment,e),_(et.$$.fragment,e),_(tt.$$.fragment,e),_(ot.$$.fragment,e),_(nt.$$.fragment,e),_(it.$$.fragment,e),_(ge.$$.fragment,e),_(st.$$.fragment,e),_(at.$$.fragment,e),_(rt.$$.fragment,e),_(lt.$$.fragment,e),_(dt.$$.fragment,e),_(ft.$$.fragment,e),_(ct.$$.fragment,e),_(pt.$$.fragment,e),Eo=!0)},o(e){v(x.$$.fragment,e),v($.$$.fragment,e),v(Ue.$$.fragment,e),v(Pe.$$.fragment,e),v(De.$$.fragment,e),v(Ze.$$.fragment,e),v(je.$$.fragment,e),v(Je.$$.fragment,e),v(te.$$.fragment,e),v(Ge.$$.fragment,e),v(Le.$$.fragment,e),v(He.$$.fragment,e),v(Se.$$.fragment,e),v(ie.$$.fragment,e),v(Ne.$$.fragment,e),v(se.$$.fragment,e),v(Fe.$$.fragment,e),v(Be.$$.fragment,e),v(Xe.$$.fragment,e),v(le.$$.fragment,e),v(Ee.$$.fragment,e),v(de.$$.fragment,e),v(ze.$$.fragment,e),v(fe.$$.fragment,e),v(ce.$$.fragment,e),v(Ye.$$.fragment,e),v(Ae.$$.fragment,e),v(Qe.$$.fragment,e),v(Oe.$$.fragment,e),v(Ke.$$.fragment,e),v(et.$$.fragment,e),v(tt.$$.fragment,e),v(ot.$$.fragment,e),v(nt.$$.fragment,e),v(it.$$.fragment,e),v(ge.$$.fragment,e),v(st.$$.fragment,e),v(at.$$.fragment,e),v(rt.$$.fragment,e),v(lt.$$.fragment,e),v(dt.$$.fragment,e),v(ft.$$.fragment,e),v(ct.$$.fragment,e),v(pt.$$.fragment,e),Eo=!1},d(e){e&&(r(C),r(w),r(f),r(o),r(ko),r(Me),r(Co),r(Te),r(Uo),r(K),r(Po),r(ke),r(Do),r(Ce),r(Zo),r(jo),r(u),r(Io),r(I),r(Jo),r(Q),r(Go),r(Lo),r(V),r(Ho),r(Wo),r(N),r(So),r(F),r(Vo),r(B),r(No),r(X),r(Fo),r(R),r(Bo),r(E),r(Xo),r(Ro),r(Mo)),r(l),y(x,e),y($,e),y(Ue,e),y(Pe),y(De),y(Ze),y(je),y(Je),y(te),y(Ge),y(Le),y(He),y(Se),y(ie),y(Ne),y(se),y(Fe),y(Be),y(Xe),y(le),y(Ee),y(de),y(ze),y(fe),y(ce),y(Ye),y(Ae),y(Qe),y(Oe),y(Ke),y(et),y(tt),y(ot,e),y(nt),y(it),y(ge),y(st,e),y(at),y(rt),y(lt),y(dt),y(ft),y(ct),y(pt,e)}}}const ta='{"title":"Pipelines","local":"pipelines","sections":[{"title":"DiffusionPipeline","local":"diffusers.DiffusionPipeline","sections":[],"depth":2},{"title":"PushToHubMixin","local":"diffusers.utils.PushToHubMixin","sections":[],"depth":2},{"title":"Callbacks","local":"diffusers.callbacks.PipelineCallback","sections":[],"depth":2}],"depth":1}';function oa(U){return Vs(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class fa extends Ns{constructor(l){super(),Fs(this,l,oa,ea,Ss,{})}}export{fa as component}; | |
Xet Storage Details
- Size:
- 102 kB
- Xet hash:
- e023a58b6fa79c3eac4a56cdb9a069fd83156e0b2a7f62fdfc03565a187e0d3e
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.