Buckets:
| import{s as Ui,o as Fi,n as N}from"../chunks/scheduler.8c3d61f6.js";import{S as Vi,i as Xi,g as n,s as t,r as _,A as Ni,h as s,f as d,c as r,j as v,u as m,x as f,k as w,y as o,a as L,v as u,d as h,t as g,w as x}from"../chunks/index.da70eac4.js";import{T as Y}from"../chunks/Tip.1d9b8c37.js";import{D as $}from"../chunks/Docstring.6b390b9a.js";import{C as ua}from"../chunks/CodeBlock.00a903b3.js";import{E as ma}from"../chunks/ExampleCodeBlock.db12be95.js";import{H as me,E as Wi}from"../chunks/EditOnGithub.1e64e623.js";function zi(D){let a,b='To learn more about how to load LoRA weights, see the <a href="../../using-diffusers/loading_adapters#lora">LoRA</a> loading guide.';return{c(){a=n("p"),a.innerHTML=b},l(c){a=s(c,"P",{"data-svelte-h":!0}),f(a)!=="svelte-1fw6lx1"&&(a.innerHTML=b)},m(c,l){L(c,a,l)},p:N,d(c){c&&d(a)}}}function qi(D){let a,b="We support loading A1111 formatted LoRA checkpoints in a limited capacity.",c,l,y="This function is experimental and might change in the future.";return{c(){a=n("p"),a.textContent=b,c=t(),l=n("p"),l.textContent=y},l(i){a=s(i,"P",{"data-svelte-h":!0}),f(a)!=="svelte-15l1sdn"&&(a.textContent=b),c=r(i),l=s(i,"P",{"data-svelte-h":!0}),f(l)!=="svelte-3fufvn"&&(l.textContent=y)},m(i,M){L(i,a,M),L(i,c,M),L(i,l,M)},p:N,d(i){i&&(d(a),d(c),d(l))}}}function Bi(D){let a,b="We support loading A1111 formatted LoRA checkpoints in a limited capacity.",c,l,y="This function is experimental and might change in the future.";return{c(){a=n("p"),a.textContent=b,c=t(),l=n("p"),l.textContent=y},l(i){a=s(i,"P",{"data-svelte-h":!0}),f(a)!=="svelte-15l1sdn"&&(a.textContent=b),c=r(i),l=s(i,"P",{"data-svelte-h":!0}),f(l)!=="svelte-3fufvn"&&(l.textContent=y)},m(i,M){L(i,a,M),L(i,c,M),L(i,l,M)},p:N,d(i){i&&(d(a),d(c),d(l))}}}function ji(D){let a,b="We support loading A1111 formatted LoRA checkpoints in a limited capacity.",c,l,y="This function is experimental and might change in the future.";return{c(){a=n("p"),a.textContent=b,c=t(),l=n("p"),l.textContent=y},l(i){a=s(i,"P",{"data-svelte-h":!0}),f(a)!=="svelte-15l1sdn"&&(a.textContent=b),c=r(i),l=s(i,"P",{"data-svelte-h":!0}),f(l)!=="svelte-3fufvn"&&(l.textContent=y)},m(i,M){L(i,a,M),L(i,c,M),L(i,l,M)},p:N,d(i){i&&(d(a),d(c),d(l))}}}function Gi(D){let a,b="We support loading A1111 formatted LoRA checkpoints in a limited capacity.",c,l,y="This function is experimental and might change in the future.";return{c(){a=n("p"),a.textContent=b,c=t(),l=n("p"),l.textContent=y},l(i){a=s(i,"P",{"data-svelte-h":!0}),f(a)!=="svelte-15l1sdn"&&(a.textContent=b),c=r(i),l=s(i,"P",{"data-svelte-h":!0}),f(l)!=="svelte-3fufvn"&&(l.textContent=y)},m(i,M){L(i,a,M),L(i,c,M),L(i,l,M)},p:N,d(i){i&&(d(a),d(c),d(l))}}}function Ji(D){let a,b="This is an experimental API.";return{c(){a=n("p"),a.textContent=b},l(c){a=s(c,"P",{"data-svelte-h":!0}),f(a)!=="svelte-8w79b9"&&(a.textContent=b)},m(c,l){L(c,a,l)},p:N,d(c){c&&d(a)}}}function Zi(D){let a,b="Examples:",c,l,y;return l=new ua({props:{code:"JTIzJTIwQXNzdW1pbmclMjAlNjBwaXBlbGluZSU2MCUyMGlzJTIwYWxyZWFkeSUyMGxvYWRlZCUyMHdpdGglMjB0aGUlMjBMb1JBJTIwcGFyYW1ldGVycy4lMEFwaXBlbGluZS51bmxvYWRfbG9yYV93ZWlnaHRzKCklMEEuLi4=",highlighted:'<span class="hljs-meta">>>> </span><span class="hljs-comment"># Assuming `pipeline` is already loaded with the LoRA parameters.</span>\n<span class="hljs-meta">>>> </span>pipeline.unload_lora_weights()\n<span class="hljs-meta">>>> </span>...',wrap:!1}}),{c(){a=n("p"),a.textContent=b,c=t(),_(l.$$.fragment)},l(i){a=s(i,"P",{"data-svelte-h":!0}),f(a)!=="svelte-kvfsh7"&&(a.textContent=b),c=r(i),m(l.$$.fragment,i)},m(i,M){L(i,a,M),L(i,c,M),u(l,i,M),y=!0},p:N,i(i){y||(h(l.$$.fragment,i),y=!0)},o(i){g(l.$$.fragment,i),y=!1},d(i){i&&(d(a),d(c)),x(l,i)}}}function Yi(D){let a,b="We support loading A1111 formatted LoRA checkpoints in a limited capacity.",c,l,y="This function is experimental and might change in the future.";return{c(){a=n("p"),a.textContent=b,c=t(),l=n("p"),l.textContent=y},l(i){a=s(i,"P",{"data-svelte-h":!0}),f(a)!=="svelte-15l1sdn"&&(a.textContent=b),c=r(i),l=s(i,"P",{"data-svelte-h":!0}),f(l)!=="svelte-3fufvn"&&(l.textContent=y)},m(i,M){L(i,a,M),L(i,c,M),L(i,l,M)},p:N,d(i){i&&(d(a),d(c),d(l))}}}function Qi(D){let a,b="This is an experimental API.";return{c(){a=n("p"),a.textContent=b},l(c){a=s(c,"P",{"data-svelte-h":!0}),f(a)!=="svelte-8w79b9"&&(a.textContent=b)},m(c,l){L(c,a,l)},p:N,d(c){c&&d(a)}}}function Oi(D){let a,b="We support loading A1111 formatted LoRA checkpoints in a limited capacity.",c,l,y="This function is experimental and might change in the future.";return{c(){a=n("p"),a.textContent=b,c=t(),l=n("p"),l.textContent=y},l(i){a=s(i,"P",{"data-svelte-h":!0}),f(a)!=="svelte-15l1sdn"&&(a.textContent=b),c=r(i),l=s(i,"P",{"data-svelte-h":!0}),f(l)!=="svelte-3fufvn"&&(l.textContent=y)},m(i,M){L(i,a,M),L(i,c,M),L(i,l,M)},p:N,d(i){i&&(d(a),d(c),d(l))}}}function Ki(D){let a,b="This is an experimental API.";return{c(){a=n("p"),a.textContent=b},l(c){a=s(c,"P",{"data-svelte-h":!0}),f(a)!=="svelte-8w79b9"&&(a.textContent=b)},m(c,l){L(c,a,l)},p:N,d(c){c&&d(a)}}}function ed(D){let a,b="This is an experimental API.";return{c(){a=n("p"),a.textContent=b},l(c){a=s(c,"P",{"data-svelte-h":!0}),f(a)!=="svelte-8w79b9"&&(a.textContent=b)},m(c,l){L(c,a,l)},p:N,d(c){c&&d(a)}}}function od(D){let a,b="Example:",c,l,y;return l=new ua({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMERpZmZ1c2lvblBpcGVsaW5lJTBBaW1wb3J0JTIwdG9yY2glMEElMEFwaXBlbGluZSUyMCUzRCUyMERpZmZ1c2lvblBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJzdGFiaWxpdHlhaSUyRnN0YWJsZS1kaWZmdXNpb24teGwtYmFzZS0xLjAlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYlMEEpLnRvKCUyMmN1ZGElMjIpJTBBcGlwZWxpbmUubG9hZF9sb3JhX3dlaWdodHMoJTIybmVyaWpzJTJGcGl4ZWwtYXJ0LXhsJTIyJTJDJTIwd2VpZ2h0X25hbWUlM0QlMjJwaXhlbC1hcnQteGwuc2FmZXRlbnNvcnMlMjIlMkMlMjBhZGFwdGVyX25hbWUlM0QlMjJwaXhlbCUyMiklMEFwaXBlbGluZS5mdXNlX2xvcmEobG9yYV9zY2FsZSUzRDAuNyk=",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> DiffusionPipeline | |
| <span class="hljs-keyword">import</span> torch | |
| pipeline = DiffusionPipeline.from_pretrained( | |
| <span class="hljs-string">"stabilityai/stable-diffusion-xl-base-1.0"</span>, torch_dtype=torch.float16 | |
| ).to(<span class="hljs-string">"cuda"</span>) | |
| pipeline.load_lora_weights(<span class="hljs-string">"nerijs/pixel-art-xl"</span>, weight_name=<span class="hljs-string">"pixel-art-xl.safetensors"</span>, adapter_name=<span class="hljs-string">"pixel"</span>) | |
| pipeline.fuse_lora(lora_scale=<span class="hljs-number">0.7</span>)`,wrap:!1}}),{c(){a=n("p"),a.textContent=b,c=t(),_(l.$$.fragment)},l(i){a=s(i,"P",{"data-svelte-h":!0}),f(a)!=="svelte-11lpom8"&&(a.textContent=b),c=r(i),m(l.$$.fragment,i)},m(i,M){L(i,a,M),L(i,c,M),u(l,i,M),y=!0},p:N,i(i){y||(h(l.$$.fragment,i),y=!0)},o(i){g(l.$$.fragment,i),y=!1},d(i){i&&(d(a),d(c)),x(l,i)}}}function td(D){let a,b="Example:",c,l,y;return l=new ua({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMERpZmZ1c2lvblBpcGVsaW5lJTBBJTBBcGlwZWxpbmUlMjAlM0QlMjBEaWZmdXNpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyc3RhYmlsaXR5YWklMkZzdGFibGUtZGlmZnVzaW9uLXhsLWJhc2UtMS4wJTIyJTJDJTBBKS50byglMjJjdWRhJTIyKSUwQXBpcGVsaW5lLmxvYWRfbG9yYV93ZWlnaHRzKCUyMkNpcm9OMjAyMiUyRnRveS1mYWNlJTIyJTJDJTIwd2VpZ2h0X25hbWUlM0QlMjJ0b3lfZmFjZV9zZHhsLnNhZmV0ZW5zb3JzJTIyJTJDJTIwYWRhcHRlcl9uYW1lJTNEJTIydG95JTIyKSUwQXBpcGVsaW5lLmdldF9hY3RpdmVfYWRhcHRlcnMoKQ==",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> DiffusionPipeline | |
| pipeline = DiffusionPipeline.from_pretrained( | |
| <span class="hljs-string">"stabilityai/stable-diffusion-xl-base-1.0"</span>, | |
| ).to(<span class="hljs-string">"cuda"</span>) | |
| pipeline.load_lora_weights(<span class="hljs-string">"CiroN2022/toy-face"</span>, weight_name=<span class="hljs-string">"toy_face_sdxl.safetensors"</span>, adapter_name=<span class="hljs-string">"toy"</span>) | |
| pipeline.get_active_adapters()`,wrap:!1}}),{c(){a=n("p"),a.textContent=b,c=t(),_(l.$$.fragment)},l(i){a=s(i,"P",{"data-svelte-h":!0}),f(a)!=="svelte-11lpom8"&&(a.textContent=b),c=r(i),m(l.$$.fragment,i)},m(i,M){L(i,a,M),L(i,c,M),u(l,i,M),y=!0},p:N,i(i){y||(h(l.$$.fragment,i),y=!0)},o(i){g(l.$$.fragment,i),y=!1},d(i){i&&(d(a),d(c)),x(l,i)}}}function rd(D){let a,b="This is an experimental API.";return{c(){a=n("p"),a.textContent=b},l(c){a=s(c,"P",{"data-svelte-h":!0}),f(a)!=="svelte-8w79b9"&&(a.textContent=b)},m(c,l){L(c,a,l)},p:N,d(c){c&&d(a)}}}function ad(D){let a,b="Examples:",c,l,y;return l=new ua({props:{code:"JTIzJTIwQXNzdW1pbmclMjAlNjBwaXBlbGluZSU2MCUyMGlzJTIwYWxyZWFkeSUyMGxvYWRlZCUyMHdpdGglMjB0aGUlMjBMb1JBJTIwcGFyYW1ldGVycy4lMEFwaXBlbGluZS51bmxvYWRfbG9yYV93ZWlnaHRzKCklMEEuLi4=",highlighted:'<span class="hljs-meta">>>> </span><span class="hljs-comment"># Assuming `pipeline` is already loaded with the LoRA parameters.</span>\n<span class="hljs-meta">>>> </span>pipeline.unload_lora_weights()\n<span class="hljs-meta">>>> </span>...',wrap:!1}}),{c(){a=n("p"),a.textContent=b,c=t(),_(l.$$.fragment)},l(i){a=s(i,"P",{"data-svelte-h":!0}),f(a)!=="svelte-kvfsh7"&&(a.textContent=b),c=r(i),m(l.$$.fragment,i)},m(i,M){L(i,a,M),L(i,c,M),u(l,i,M),y=!0},p:N,i(i){y||(h(l.$$.fragment,i),y=!0)},o(i){g(l.$$.fragment,i),y=!1},d(i){i&&(d(a),d(c)),x(l,i)}}}function nd(D){let a,b,c,l,y,i,M,Ds='LoRA is a fast and lightweight training method that inserts and trains a significantly smaller number of parameters instead of all the model parameters. This produces a smaller file (~100 MBs) and makes it easier to quickly train a model to learn a new concept. LoRA weights are typically loaded into the denoiser, text encoder or both. The denoiser usually corresponds to a UNet (<a href="/docs/diffusers/pr_10725/en/api/models/unet2d-cond#diffusers.UNet2DConditionModel">UNet2DConditionModel</a>, for example) or a Transformer (<a href="/docs/diffusers/pr_10725/en/api/models/sd3_transformer2d#diffusers.SD3Transformer2DModel">SD3Transformer2DModel</a>, for example). There are several classes for loading LoRA weights:',Pr,Ke,Ts='<li><code>StableDiffusionLoraLoaderMixin</code> provides functions for loading and unloading, fusing and unfusing, enabling and disabling, and more functions for managing LoRA weights. This class can be used with any model.</li> <li><code>StableDiffusionXLLoraLoaderMixin</code> is a <a href="../../api/pipelines/stable_diffusion/stable_diffusion_xl">Stable Diffusion (SDXL)</a> version of the <code>StableDiffusionLoraLoaderMixin</code> class for loading and saving LoRA weights. It can only be used with the SDXL model.</li> <li><code>SD3LoraLoaderMixin</code> provides similar functions for <a href="https://huggingface.co/blog/sd3" rel="nofollow">Stable Diffusion 3</a>.</li> <li><code>FluxLoraLoaderMixin</code> provides similar functions for <a href="https://huggingface.co/docs/diffusers/main/en/api/pipelines/flux" rel="nofollow">Flux</a>.</li> <li><code>CogVideoXLoraLoaderMixin</code> provides similar functions for <a href="https://huggingface.co/docs/diffusers/main/en/api/pipelines/cogvideox" rel="nofollow">CogVideoX</a>.</li> <li><code>Mochi1LoraLoaderMixin</code> provides similar functions for <a href="https://huggingface.co/docs/diffusers/main/en/api/pipelines/mochi" rel="nofollow">Mochi</a>.</li> <li><code>AmusedLoraLoaderMixin</code> is for the <a href="/docs/diffusers/pr_10725/en/api/pipelines/amused#diffusers.AmusedPipeline">AmusedPipeline</a>.</li> <li><code>LoraBaseMixin</code> provides a base class with several utility methods to fuse, unfuse, unload, LoRAs and more.</li>',Ir,ue,Hr,eo,Er,A,oo,ha,pt,Ss=`Load LoRA layers into Stable Diffusion <a href="/docs/diffusers/pr_10725/en/api/models/unet2d-cond#diffusers.UNet2DConditionModel">UNet2DConditionModel</a> and | |
| <a href="https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel" rel="nofollow"><code>CLIPTextModel</code></a>.`,ga,he,to,xa,_t,Cs="This will load the LoRA layers specified in <code>state_dict</code> into <code>text_encoder</code>",La,ge,ro,ba,mt,ks="This will load the LoRA layers specified in <code>state_dict</code> into <code>unet</code>.",va,V,ao,wa,ut,As=`Load LoRA weights specified in <code>pretrained_model_name_or_path_or_dict</code> into <code>self.unet</code> and | |
| <code>self.text_encoder</code>.`,$a,ht,Rs="All kwargs are forwarded to <code>self.lora_state_dict</code>.",Ma,gt,Ps=`See <a href="/docs/diffusers/pr_10725/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict">lora_state_dict()</a> for more details on how the state dict is | |
| loaded.`,ya,xt,Is=`See <a href="/docs/diffusers/pr_10725/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet">load_lora_into_unet()</a> for more details on how the state dict is | |
| loaded into <code>self.unet</code>.`,Da,Lt,Hs=`See <a href="/docs/diffusers/pr_10725/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder">load_lora_into_text_encoder()</a> for more details on how the state | |
| dict is loaded into <code>self.text_encoder</code>.`,Ta,Q,no,Sa,bt,Es="Return state dict for lora weights and the network alphas.",Ca,xe,ka,Le,so,Aa,vt,Us="Save the LoRA parameters corresponding to the UNet and text encoder.",Ur,io,Fr,R,lo,Ra,wt,Fs=`Load LoRA layers into Stable Diffusion XL <a href="/docs/diffusers/pr_10725/en/api/models/unet2d-cond#diffusers.UNet2DConditionModel">UNet2DConditionModel</a>, | |
| <a href="https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel" rel="nofollow"><code>CLIPTextModel</code></a>, and | |
| <a href="https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection" rel="nofollow"><code>CLIPTextModelWithProjection</code></a>.`,Pa,be,co,Ia,$t,Vs="This will load the LoRA layers specified in <code>state_dict</code> into <code>text_encoder</code>",Ha,ve,fo,Ea,Mt,Xs="This will load the LoRA layers specified in <code>state_dict</code> into <code>unet</code>.",Ua,X,po,Fa,yt,Ns=`Load LoRA weights specified in <code>pretrained_model_name_or_path_or_dict</code> into <code>self.unet</code> and | |
| <code>self.text_encoder</code>.`,Va,Dt,Ws="All kwargs are forwarded to <code>self.lora_state_dict</code>.",Xa,Tt,zs=`See <a href="/docs/diffusers/pr_10725/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict">lora_state_dict()</a> for more details on how the state dict is | |
| loaded.`,Na,St,qs=`See <a href="/docs/diffusers/pr_10725/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet">load_lora_into_unet()</a> for more details on how the state dict is | |
| loaded into <code>self.unet</code>.`,Wa,Ct,Bs=`See <a href="/docs/diffusers/pr_10725/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder">load_lora_into_text_encoder()</a> for more details on how the state | |
| dict is loaded into <code>self.text_encoder</code>.`,za,O,_o,qa,kt,js="Return state dict for lora weights and the network alphas.",Ba,we,ja,$e,mo,Ga,At,Gs="Save the LoRA parameters corresponding to the UNet and text encoder.",Vr,uo,Xr,C,ho,Ja,Rt,Js=`Load LoRA layers into <a href="/docs/diffusers/pr_10725/en/api/models/sd3_transformer2d#diffusers.SD3Transformer2DModel">SD3Transformer2DModel</a>, | |
| <a href="https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel" rel="nofollow"><code>CLIPTextModel</code></a>, and | |
| <a href="https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection" rel="nofollow"><code>CLIPTextModelWithProjection</code></a>.`,Za,Pt,Zs='Specific to <a href="/docs/diffusers/pr_10725/en/api/pipelines/stable_diffusion/stable_diffusion_3#diffusers.StableDiffusion3Pipeline">StableDiffusion3Pipeline</a>.',Ya,Me,go,Qa,It,Ys="This will load the LoRA layers specified in <code>state_dict</code> into <code>text_encoder</code>",Oa,ye,xo,Ka,Ht,Qs="This will load the LoRA layers specified in <code>state_dict</code> into <code>transformer</code>.",en,q,Lo,on,Et,Os=`Load LoRA weights specified in <code>pretrained_model_name_or_path_or_dict</code> into <code>self.unet</code> and | |
| <code>self.text_encoder</code>.`,tn,Ut,Ks="All kwargs are forwarded to <code>self.lora_state_dict</code>.",rn,Ft,ei=`See <a href="/docs/diffusers/pr_10725/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict">lora_state_dict()</a> for more details on how the state dict is | |
| loaded.`,an,Vt,oi=`See <code>~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer</code> for more details on how the state | |
| dict is loaded into <code>self.transformer</code>.`,nn,K,bo,sn,Xt,ti="Return state dict for lora weights and the network alphas.",dn,De,ln,Te,vo,cn,Nt,ri="Save the LoRA parameters corresponding to the UNet and text encoder.",Nr,wo,Wr,T,$o,fn,Wt,ai=`Load LoRA layers into <a href="/docs/diffusers/pr_10725/en/api/models/flux_transformer#diffusers.FluxTransformer2DModel">FluxTransformer2DModel</a>, | |
| <a href="https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel" rel="nofollow"><code>CLIPTextModel</code></a>.`,pn,zt,ni='Specific to <a href="/docs/diffusers/pr_10725/en/api/pipelines/stable_diffusion/stable_diffusion_3#diffusers.StableDiffusion3Pipeline">StableDiffusion3Pipeline</a>.',_n,Se,Mo,mn,qt,si="This will load the LoRA layers specified in <code>state_dict</code> into <code>text_encoder</code>",un,Ce,yo,hn,Bt,ii="This will load the LoRA layers specified in <code>state_dict</code> into <code>transformer</code>.",gn,B,Do,xn,jt,di=`Load LoRA weights specified in <code>pretrained_model_name_or_path_or_dict</code> into <code>self.transformer</code> and | |
| <code>self.text_encoder</code>.`,Ln,Gt,li="All kwargs are forwarded to <code>self.lora_state_dict</code>.",bn,Jt,ci=`See <a href="/docs/diffusers/pr_10725/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict">lora_state_dict()</a> for more details on how the state dict is | |
| loaded.`,vn,Zt,fi=`See <code>~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer</code> for more details on how the state | |
| dict is loaded into <code>self.transformer</code>.`,wn,ee,To,$n,Yt,pi="Return state dict for lora weights and the network alphas.",Mn,ke,yn,Ae,So,Dn,Qt,_i="Save the LoRA parameters corresponding to the UNet and text encoder.",Tn,oe,Co,Sn,Ot,mi=`Reverses the effect of | |
| <a href="https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora" rel="nofollow"><code>pipe.fuse_lora()</code></a>.`,Cn,Re,kn,te,ko,An,Kt,ui="Unloads the LoRA parameters.",Rn,Pe,zr,Ao,qr,P,Ro,Pn,er,hi='Load LoRA layers into <a href="/docs/diffusers/pr_10725/en/api/models/cogvideox_transformer3d#diffusers.CogVideoXTransformer3DModel">CogVideoXTransformer3DModel</a>. Specific to <a href="/docs/diffusers/pr_10725/en/api/pipelines/cogvideox#diffusers.CogVideoXPipeline">CogVideoXPipeline</a>.',In,Ie,Po,Hn,or,gi="This will load the LoRA layers specified in <code>state_dict</code> into <code>transformer</code>.",En,He,Io,Un,tr,xi=`Load LoRA weights specified in <code>pretrained_model_name_or_path_or_dict</code> into <code>self.transformer</code> and | |
| <code>self.text_encoder</code>. All kwargs are forwarded to <code>self.lora_state_dict</code>. See | |
| <a href="/docs/diffusers/pr_10725/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict">lora_state_dict()</a> for more details on how the state dict is loaded. | |
| See <code>~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer</code> for more details on how the state | |
| dict is loaded into <code>self.transformer</code>.`,Fn,re,Ho,Vn,rr,Li="Return state dict for lora weights and the network alphas.",Xn,Ee,Nn,Ue,Eo,Wn,ar,bi="Save the LoRA parameters corresponding to the UNet and text encoder.",zn,ae,Uo,qn,nr,vi=`Reverses the effect of | |
| <a href="https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora" rel="nofollow"><code>pipe.fuse_lora()</code></a>.`,Bn,Fe,Br,Fo,jr,I,Vo,jn,sr,wi='Load LoRA layers into <a href="/docs/diffusers/pr_10725/en/api/models/mochi_transformer3d#diffusers.MochiTransformer3DModel">MochiTransformer3DModel</a>. Specific to <a href="/docs/diffusers/pr_10725/en/api/pipelines/mochi#diffusers.MochiPipeline">MochiPipeline</a>.',Gn,Ve,Xo,Jn,ir,$i="This will load the LoRA layers specified in <code>state_dict</code> into <code>transformer</code>.",Zn,Xe,No,Yn,dr,Mi=`Load LoRA weights specified in <code>pretrained_model_name_or_path_or_dict</code> into <code>self.transformer</code> and | |
| <code>self.text_encoder</code>. All kwargs are forwarded to <code>self.lora_state_dict</code>. See | |
| <a href="/docs/diffusers/pr_10725/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict">lora_state_dict()</a> for more details on how the state dict is loaded. | |
| See <code>~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer</code> for more details on how the state | |
| dict is loaded into <code>self.transformer</code>.`,Qn,ne,Wo,On,lr,yi="Return state dict for lora weights and the network alphas.",Kn,Ne,es,We,zo,os,cr,Di="Save the LoRA parameters corresponding to the UNet and text encoder.",ts,se,qo,rs,fr,Ti=`Reverses the effect of | |
| <a href="https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora" rel="nofollow"><code>pipe.fuse_lora()</code></a>.`,as,ze,Gr,Bo,Jr,fe,jo,ns,qe,Go,ss,pr,Si="This will load the LoRA layers specified in <code>state_dict</code> into <code>transformer</code>.",Zr,Jo,Yr,S,Zo,is,_r,Ci="Utility class for handling LoRAs.",ds,mr,Yo,ls,Z,Qo,cs,ur,ki="Fuses the LoRA parameters into the original parameters of the corresponding blocks.",fs,Be,ps,je,_s,ie,Oo,ms,hr,Ai="Gets the list of the current active adapters.",us,Ge,hs,Je,Ko,gs,gr,Ri="Gets the current list of all available adapters in the pipeline.",xs,Ze,et,Ls,xr,Pi=`Moves the LoRAs listed in <code>adapter_names</code> to a target device. Useful for offloading the LoRA to the CPU in case | |
| you want to load multiple adapters and free some GPU memory.`,bs,de,ot,vs,Lr,Ii=`Reverses the effect of | |
| <a href="https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora" rel="nofollow"><code>pipe.fuse_lora()</code></a>.`,ws,Ye,$s,le,tt,Ms,br,Hi="Unloads the LoRA parameters.",ys,Qe,Qr,rt,Or,Rr,Kr;return y=new me({props:{title:"LoRA",local:"lora",headingTag:"h1"}}),ue=new Y({props:{$$slots:{default:[zi]},$$scope:{ctx:D}}}),eo=new me({props:{title:"StableDiffusionLoraLoaderMixin",local:"diffusers.loaders.StableDiffusionLoraLoaderMixin",headingTag:"h2"}}),oo=new $({props:{name:"class diffusers.loaders.StableDiffusionLoraLoaderMixin",anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L69"}}),to=new $({props:{name:"load_lora_into_text_encoder",anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder",parameters:[{name:"state_dict",val:""},{name:"network_alphas",val:""},{name:"text_encoder",val:""},{name:"prefix",val:" = None"},{name:"lora_scale",val:" = 1.0"},{name:"adapter_name",val:" = None"},{name:"_pipeline",val:" = None"},{name:"low_cpu_mem_usage",val:" = False"}],parametersDescription:[{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder.state_dict",description:`<strong>state_dict</strong> (<code>dict</code>) — | |
| A standard state dict containing the lora layer parameters. The key should be prefixed with an | |
| additional <code>text_encoder</code> to distinguish between unet lora layers.`,name:"state_dict"},{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder.network_alphas",description:`<strong>network_alphas</strong> (<code>Dict[str, float]</code>) — | |
| The value of the network alpha used for stable learning and preventing underflow. This value has the | |
| same meaning as the <code>--network_alpha</code> option in the kohya-ss trainer script. Refer to <a href="https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning" rel="nofollow">this | |
| link</a>.`,name:"network_alphas"},{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder.text_encoder",description:`<strong>text_encoder</strong> (<code>CLIPTextModel</code>) — | |
| The text encoder model to load the LoRA layers into.`,name:"text_encoder"},{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder.prefix",description:`<strong>prefix</strong> (<code>str</code>) — | |
| Expected prefix of the <code>text_encoder</code> in the <code>state_dict</code>.`,name:"prefix"},{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>) — | |
| How much to scale the output of the lora linear layer before it is added with the output of the regular | |
| lora layer.`,name:"lora_scale"},{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder.adapter_name",description:`<strong>adapter_name</strong> (<code>str</code>, <em>optional</em>) — | |
| Adapter name to be used for referencing the loaded adapter model. If not specified, it will use | |
| <code>default_{i}</code> where i is the total number of adapters being loaded.`,name:"adapter_name"},{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder.low_cpu_mem_usage",description:`<strong>low_cpu_mem_usage</strong> (<code>bool</code>, <em>optional</em>) — | |
| Speed up model loading by only loading the pretrained LoRA weights and not initializing the random | |
| weights.`,name:"low_cpu_mem_usage"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L314"}}),ro=new $({props:{name:"load_lora_into_unet",anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet",parameters:[{name:"state_dict",val:""},{name:"network_alphas",val:""},{name:"unet",val:""},{name:"adapter_name",val:" = None"},{name:"_pipeline",val:" = None"},{name:"low_cpu_mem_usage",val:" = False"}],parametersDescription:[{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet.state_dict",description:`<strong>state_dict</strong> (<code>dict</code>) — | |
| A standard state dict containing the lora layer parameters. The keys can either be indexed directly | |
| into the unet or prefixed with an additional <code>unet</code> which can be used to distinguish between text | |
| encoder lora layers.`,name:"state_dict"},{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet.network_alphas",description:`<strong>network_alphas</strong> (<code>Dict[str, float]</code>) — | |
| The value of the network alpha used for stable learning and preventing underflow. This value has the | |
| same meaning as the <code>--network_alpha</code> option in the kohya-ss trainer script. Refer to <a href="https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning" rel="nofollow">this | |
| link</a>.`,name:"network_alphas"},{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet.unet",description:`<strong>unet</strong> (<code>UNet2DConditionModel</code>) — | |
| The UNet model to load the LoRA layers into.`,name:"unet"},{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet.adapter_name",description:`<strong>adapter_name</strong> (<code>str</code>, <em>optional</em>) — | |
| Adapter name to be used for referencing the loaded adapter model. If not specified, it will use | |
| <code>default_{i}</code> where i is the total number of adapters being loaded.`,name:"adapter_name"},{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet.low_cpu_mem_usage",description:`<strong>low_cpu_mem_usage</strong> (<code>bool</code>, <em>optional</em>) — | |
| Speed up model loading only loading the pretrained LoRA weights and not initializing the random | |
| weights.`,name:"low_cpu_mem_usage"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L264"}}),ao=new $({props:{name:"load_lora_weights",anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_weights",parameters:[{name:"pretrained_model_name_or_path_or_dict",val:": typing.Union[str, typing.Dict[str, torch.Tensor]]"},{name:"adapter_name",val:" = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_weights.pretrained_model_name_or_path_or_dict",description:`<strong>pretrained_model_name_or_path_or_dict</strong> (<code>str</code> or <code>os.PathLike</code> or <code>dict</code>) — | |
| See <a href="/docs/diffusers/pr_10725/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict">lora_state_dict()</a>.`,name:"pretrained_model_name_or_path_or_dict"},{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_weights.adapter_name",description:`<strong>adapter_name</strong> (<code>str</code>, <em>optional</em>) — | |
| Adapter name to be used for referencing the loaded adapter model. If not specified, it will use | |
| <code>default_{i}</code> where i is the total number of adapters being loaded.`,name:"adapter_name"},{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_weights.low_cpu_mem_usage",description:`<strong>low_cpu_mem_usage</strong> (<code>bool</code>, <em>optional</em>) — | |
| Speed up model loading by only loading the pretrained LoRA weights and not initializing the random | |
| weights.`,name:"low_cpu_mem_usage"},{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.load_lora_weights.kwargs",description:`<strong>kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| See <a href="/docs/diffusers/pr_10725/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict">lora_state_dict()</a>.`,name:"kwargs"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L79"}}),no=new $({props:{name:"lora_state_dict",anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict",parameters:[{name:"pretrained_model_name_or_path_or_dict",val:": typing.Union[str, typing.Dict[str, torch.Tensor]]"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict.pretrained_model_name_or_path_or_dict",description:`<strong>pretrained_model_name_or_path_or_dict</strong> (<code>str</code> or <code>os.PathLike</code> or <code>dict</code>) — | |
| Can be either:</p> | |
| <ul> | |
| <li>A string, the <em>model id</em> (for example <code>google/ddpm-celebahq-256</code>) of a pretrained model hosted on | |
| the Hub.</li> | |
| <li>A path to a <em>directory</em> (for example <code>./my_model_directory</code>) containing the model weights saved | |
| with <a href="/docs/diffusers/pr_10725/en/api/models/overview#diffusers.ModelMixin.save_pretrained">ModelMixin.save_pretrained()</a>.</li> | |
| <li>A <a href="https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict" rel="nofollow">torch state | |
| dict</a>.</li> | |
| </ul>`,name:"pretrained_model_name_or_path_or_dict"},{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict.cache_dir",description:`<strong>cache_dir</strong> (<code>Union[str, os.PathLike]</code>, <em>optional</em>) — | |
| Path to a directory where a downloaded pretrained model configuration is cached if the standard cache | |
| is not used.`,name:"cache_dir"},{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict.force_download",description:`<strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether or not to force the (re-)download of the model weights and configuration files, overriding the | |
| cached versions if they exist.`,name:"force_download"},{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict.proxies",description:`<strong>proxies</strong> (<code>Dict[str, str]</code>, <em>optional</em>) — | |
| A dictionary of proxy servers to use by protocol or endpoint, for example, <code>{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}</code>. The proxies are used on each request.`,name:"proxies"},{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict.local_files_only",description:`<strong>local_files_only</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether to only load local model weights and configuration files or not. If set to <code>True</code>, the model | |
| won’t be downloaded from the Hub.`,name:"local_files_only"},{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict.token",description:`<strong>token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) — | |
| The token to use as HTTP bearer authorization for remote files. If <code>True</code>, the token generated from | |
| <code>diffusers-cli login</code> (stored in <code>~/.huggingface</code>) is used.`,name:"token"},{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict.revision",description:`<strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"main"</code>) — | |
| The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier | |
| allowed by Git.`,name:"revision"},{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict.subfolder",description:`<strong>subfolder</strong> (<code>str</code>, <em>optional</em>, defaults to <code>""</code>) — | |
| The subfolder location of a model file within a larger model repository on the Hub or locally.`,name:"subfolder"},{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict.weight_name",description:`<strong>weight_name</strong> (<code>str</code>, <em>optional</em>, defaults to None) — | |
| Name of the serialized state dict file.`,name:"weight_name"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L149"}}),xe=new Y({props:{warning:!0,$$slots:{default:[qi]},$$scope:{ctx:D}}}),so=new $({props:{name:"save_lora_weights",anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.save_lora_weights",parameters:[{name:"save_directory",val:": typing.Union[str, os.PathLike]"},{name:"unet_lora_layers",val:": typing.Dict[str, typing.Union[torch.nn.modules.module.Module, torch.Tensor]] = None"},{name:"text_encoder_lora_layers",val:": typing.Dict[str, torch.nn.modules.module.Module] = None"},{name:"is_main_process",val:": bool = True"},{name:"weight_name",val:": str = None"},{name:"save_function",val:": typing.Callable = None"},{name:"safe_serialization",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.save_lora_weights.save_directory",description:`<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) — | |
| Directory to save LoRA parameters to. Will be created if it doesn’t exist.`,name:"save_directory"},{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.save_lora_weights.unet_lora_layers",description:`<strong>unet_lora_layers</strong> (<code>Dict[str, torch.nn.Module]</code> or <code>Dict[str, torch.Tensor]</code>) — | |
| State dict of the LoRA layers corresponding to the <code>unet</code>.`,name:"unet_lora_layers"},{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.save_lora_weights.text_encoder_lora_layers",description:`<strong>text_encoder_lora_layers</strong> (<code>Dict[str, torch.nn.Module]</code> or <code>Dict[str, torch.Tensor]</code>) — | |
| State dict of the LoRA layers corresponding to the <code>text_encoder</code>. Must explicitly pass the text | |
| encoder LoRA state dict because it comes from 🤗 Transformers.`,name:"text_encoder_lora_layers"},{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.save_lora_weights.is_main_process",description:`<strong>is_main_process</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether the process calling this is the main process or not. Useful during distributed training and you | |
| need to call this function on all processes. In this case, set <code>is_main_process=True</code> only on the main | |
| process to avoid race conditions.`,name:"is_main_process"},{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.save_lora_weights.save_function",description:`<strong>save_function</strong> (<code>Callable</code>) — | |
| The function to use to save the state dictionary. Useful during distributed training when you need to | |
| replace <code>torch.save</code> with another method. Can be configured with the environment variable | |
| <code>DIFFUSERS_SAVE_MODE</code>.`,name:"save_function"},{anchor:"diffusers.loaders.StableDiffusionLoraLoaderMixin.save_lora_weights.safe_serialization",description:`<strong>safe_serialization</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether to save the model using <code>safetensors</code> or the traditional PyTorch way with <code>pickle</code>.`,name:"safe_serialization"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L363"}}),io=new me({props:{title:"StableDiffusionXLLoraLoaderMixin",local:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin",headingTag:"h2"}}),lo=new $({props:{name:"class diffusers.loaders.StableDiffusionXLLoraLoaderMixin",anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L481"}}),co=new $({props:{name:"load_lora_into_text_encoder",anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.load_lora_into_text_encoder",parameters:[{name:"state_dict",val:""},{name:"network_alphas",val:""},{name:"text_encoder",val:""},{name:"prefix",val:" = None"},{name:"lora_scale",val:" = 1.0"},{name:"adapter_name",val:" = None"},{name:"_pipeline",val:" = None"},{name:"low_cpu_mem_usage",val:" = False"}],parametersDescription:[{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.load_lora_into_text_encoder.state_dict",description:`<strong>state_dict</strong> (<code>dict</code>) — | |
| A standard state dict containing the lora layer parameters. The key should be prefixed with an | |
| additional <code>text_encoder</code> to distinguish between unet lora layers.`,name:"state_dict"},{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.load_lora_into_text_encoder.network_alphas",description:`<strong>network_alphas</strong> (<code>Dict[str, float]</code>) — | |
| The value of the network alpha used for stable learning and preventing underflow. This value has the | |
| same meaning as the <code>--network_alpha</code> option in the kohya-ss trainer script. Refer to <a href="https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning" rel="nofollow">this | |
| link</a>.`,name:"network_alphas"},{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.load_lora_into_text_encoder.text_encoder",description:`<strong>text_encoder</strong> (<code>CLIPTextModel</code>) — | |
| The text encoder model to load the LoRA layers into.`,name:"text_encoder"},{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.load_lora_into_text_encoder.prefix",description:`<strong>prefix</strong> (<code>str</code>) — | |
| Expected prefix of the <code>text_encoder</code> in the <code>state_dict</code>.`,name:"prefix"},{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.load_lora_into_text_encoder.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>) — | |
| How much to scale the output of the lora linear layer before it is added with the output of the regular | |
| lora layer.`,name:"lora_scale"},{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.load_lora_into_text_encoder.adapter_name",description:`<strong>adapter_name</strong> (<code>str</code>, <em>optional</em>) — | |
| Adapter name to be used for referencing the loaded adapter model. If not specified, it will use | |
| <code>default_{i}</code> where i is the total number of adapters being loaded.`,name:"adapter_name"},{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.load_lora_into_text_encoder.low_cpu_mem_usage",description:`<strong>low_cpu_mem_usage</strong> (<code>bool</code>, <em>optional</em>) — | |
| Speed up model loading by only loading the pretrained LoRA weights and not initializing the random | |
| weights.`,name:"low_cpu_mem_usage"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L754"}}),fo=new $({props:{name:"load_lora_into_unet",anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.load_lora_into_unet",parameters:[{name:"state_dict",val:""},{name:"network_alphas",val:""},{name:"unet",val:""},{name:"adapter_name",val:" = None"},{name:"_pipeline",val:" = None"},{name:"low_cpu_mem_usage",val:" = False"}],parametersDescription:[{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.load_lora_into_unet.state_dict",description:`<strong>state_dict</strong> (<code>dict</code>) — | |
| A standard state dict containing the lora layer parameters. The keys can either be indexed directly | |
| into the unet or prefixed with an additional <code>unet</code> which can be used to distinguish between text | |
| encoder lora layers.`,name:"state_dict"},{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.load_lora_into_unet.network_alphas",description:`<strong>network_alphas</strong> (<code>Dict[str, float]</code>) — | |
| The value of the network alpha used for stable learning and preventing underflow. This value has the | |
| same meaning as the <code>--network_alpha</code> option in the kohya-ss trainer script. Refer to <a href="https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning" rel="nofollow">this | |
| link</a>.`,name:"network_alphas"},{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.load_lora_into_unet.unet",description:`<strong>unet</strong> (<code>UNet2DConditionModel</code>) — | |
| The UNet model to load the LoRA layers into.`,name:"unet"},{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.load_lora_into_unet.adapter_name",description:`<strong>adapter_name</strong> (<code>str</code>, <em>optional</em>) — | |
| Adapter name to be used for referencing the loaded adapter model. If not specified, it will use | |
| <code>default_{i}</code> where i is the total number of adapters being loaded.`,name:"adapter_name"},{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.load_lora_into_unet.low_cpu_mem_usage",description:`<strong>low_cpu_mem_usage</strong> (<code>bool</code>, <em>optional</em>) — | |
| Speed up model loading only loading the pretrained LoRA weights and not initializing the random | |
| weights.`,name:"low_cpu_mem_usage"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L703"}}),po=new $({props:{name:"load_lora_weights",anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights",parameters:[{name:"pretrained_model_name_or_path_or_dict",val:": typing.Union[str, typing.Dict[str, torch.Tensor]]"},{name:"adapter_name",val:": typing.Optional[str] = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights.pretrained_model_name_or_path_or_dict",description:`<strong>pretrained_model_name_or_path_or_dict</strong> (<code>str</code> or <code>os.PathLike</code> or <code>dict</code>) — | |
| See <a href="/docs/diffusers/pr_10725/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict">lora_state_dict()</a>.`,name:"pretrained_model_name_or_path_or_dict"},{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights.adapter_name",description:`<strong>adapter_name</strong> (<code>str</code>, <em>optional</em>) — | |
| Adapter name to be used for referencing the loaded adapter model. If not specified, it will use | |
| <code>default_{i}</code> where i is the total number of adapters being loaded.`,name:"adapter_name"},{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights.low_cpu_mem_usage",description:`<strong>low_cpu_mem_usage</strong> (<code>bool</code>, <em>optional</em>) — | |
| Speed up model loading by only loading the pretrained LoRA weights and not initializing the random | |
| weights.`,name:"low_cpu_mem_usage"},{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights.kwargs",description:`<strong>kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| See <a href="/docs/diffusers/pr_10725/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict">lora_state_dict()</a>.`,name:"kwargs"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L492"}}),_o=new $({props:{name:"lora_state_dict",anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.lora_state_dict",parameters:[{name:"pretrained_model_name_or_path_or_dict",val:": typing.Union[str, typing.Dict[str, torch.Tensor]]"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.lora_state_dict.pretrained_model_name_or_path_or_dict",description:`<strong>pretrained_model_name_or_path_or_dict</strong> (<code>str</code> or <code>os.PathLike</code> or <code>dict</code>) — | |
| Can be either:</p> | |
| <ul> | |
| <li>A string, the <em>model id</em> (for example <code>google/ddpm-celebahq-256</code>) of a pretrained model hosted on | |
| the Hub.</li> | |
| <li>A path to a <em>directory</em> (for example <code>./my_model_directory</code>) containing the model weights saved | |
| with <a href="/docs/diffusers/pr_10725/en/api/models/overview#diffusers.ModelMixin.save_pretrained">ModelMixin.save_pretrained()</a>.</li> | |
| <li>A <a href="https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict" rel="nofollow">torch state | |
| dict</a>.</li> | |
| </ul>`,name:"pretrained_model_name_or_path_or_dict"},{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.lora_state_dict.cache_dir",description:`<strong>cache_dir</strong> (<code>Union[str, os.PathLike]</code>, <em>optional</em>) — | |
| Path to a directory where a downloaded pretrained model configuration is cached if the standard cache | |
| is not used.`,name:"cache_dir"},{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.lora_state_dict.force_download",description:`<strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether or not to force the (re-)download of the model weights and configuration files, overriding the | |
| cached versions if they exist.`,name:"force_download"},{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.lora_state_dict.proxies",description:`<strong>proxies</strong> (<code>Dict[str, str]</code>, <em>optional</em>) — | |
| A dictionary of proxy servers to use by protocol or endpoint, for example, <code>{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}</code>. The proxies are used on each request.`,name:"proxies"},{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.lora_state_dict.local_files_only",description:`<strong>local_files_only</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether to only load local model weights and configuration files or not. If set to <code>True</code>, the model | |
| won’t be downloaded from the Hub.`,name:"local_files_only"},{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.lora_state_dict.token",description:`<strong>token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) — | |
| The token to use as HTTP bearer authorization for remote files. If <code>True</code>, the token generated from | |
| <code>diffusers-cli login</code> (stored in <code>~/.huggingface</code>) is used.`,name:"token"},{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.lora_state_dict.revision",description:`<strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"main"</code>) — | |
| The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier | |
| allowed by Git.`,name:"revision"},{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.lora_state_dict.subfolder",description:`<strong>subfolder</strong> (<code>str</code>, <em>optional</em>, defaults to <code>""</code>) — | |
| The subfolder location of a model file within a larger model repository on the Hub or locally.`,name:"subfolder"},{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.lora_state_dict.weight_name",description:`<strong>weight_name</strong> (<code>str</code>, <em>optional</em>, defaults to None) — | |
| Name of the serialized state dict file.`,name:"weight_name"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L587"}}),we=new Y({props:{warning:!0,$$slots:{default:[Bi]},$$scope:{ctx:D}}}),mo=new $({props:{name:"save_lora_weights",anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights",parameters:[{name:"save_directory",val:": typing.Union[str, os.PathLike]"},{name:"unet_lora_layers",val:": typing.Dict[str, typing.Union[torch.nn.modules.module.Module, torch.Tensor]] = None"},{name:"text_encoder_lora_layers",val:": typing.Dict[str, typing.Union[torch.nn.modules.module.Module, torch.Tensor]] = None"},{name:"text_encoder_2_lora_layers",val:": typing.Dict[str, typing.Union[torch.nn.modules.module.Module, torch.Tensor]] = None"},{name:"is_main_process",val:": bool = True"},{name:"weight_name",val:": str = None"},{name:"save_function",val:": typing.Callable = None"},{name:"safe_serialization",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights.save_directory",description:`<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) — | |
| Directory to save LoRA parameters to. Will be created if it doesn’t exist.`,name:"save_directory"},{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights.unet_lora_layers",description:`<strong>unet_lora_layers</strong> (<code>Dict[str, torch.nn.Module]</code> or <code>Dict[str, torch.Tensor]</code>) — | |
| State dict of the LoRA layers corresponding to the <code>unet</code>.`,name:"unet_lora_layers"},{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights.text_encoder_lora_layers",description:`<strong>text_encoder_lora_layers</strong> (<code>Dict[str, torch.nn.Module]</code> or <code>Dict[str, torch.Tensor]</code>) — | |
| State dict of the LoRA layers corresponding to the <code>text_encoder</code>. Must explicitly pass the text | |
| encoder LoRA state dict because it comes from 🤗 Transformers.`,name:"text_encoder_lora_layers"},{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights.text_encoder_2_lora_layers",description:`<strong>text_encoder_2_lora_layers</strong> (<code>Dict[str, torch.nn.Module]</code> or <code>Dict[str, torch.Tensor]</code>) — | |
| State dict of the LoRA layers corresponding to the <code>text_encoder_2</code>. Must explicitly pass the text | |
| encoder LoRA state dict because it comes from 🤗 Transformers.`,name:"text_encoder_2_lora_layers"},{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights.is_main_process",description:`<strong>is_main_process</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether the process calling this is the main process or not. Useful during distributed training and you | |
| need to call this function on all processes. In this case, set <code>is_main_process=True</code> only on the main | |
| process to avoid race conditions.`,name:"is_main_process"},{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights.save_function",description:`<strong>save_function</strong> (<code>Callable</code>) — | |
| The function to use to save the state dictionary. Useful during distributed training when you need to | |
| replace <code>torch.save</code> with another method. Can be configured with the environment variable | |
| <code>DIFFUSERS_SAVE_MODE</code>.`,name:"save_function"},{anchor:"diffusers.loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights.safe_serialization",description:`<strong>safe_serialization</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether to save the model using <code>safetensors</code> or the traditional PyTorch way with <code>pickle</code>.`,name:"safe_serialization"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L804"}}),uo=new me({props:{title:"SD3LoraLoaderMixin",local:"diffusers.loaders.SD3LoraLoaderMixin",headingTag:"h2"}}),ho=new $({props:{name:"class diffusers.loaders.SD3LoraLoaderMixin",anchor:"diffusers.loaders.SD3LoraLoaderMixin",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L930"}}),go=new $({props:{name:"load_lora_into_text_encoder",anchor:"diffusers.loaders.SD3LoraLoaderMixin.load_lora_into_text_encoder",parameters:[{name:"state_dict",val:""},{name:"network_alphas",val:""},{name:"text_encoder",val:""},{name:"prefix",val:" = None"},{name:"lora_scale",val:" = 1.0"},{name:"adapter_name",val:" = None"},{name:"_pipeline",val:" = None"},{name:"low_cpu_mem_usage",val:" = False"}],parametersDescription:[{anchor:"diffusers.loaders.SD3LoraLoaderMixin.load_lora_into_text_encoder.state_dict",description:`<strong>state_dict</strong> (<code>dict</code>) — | |
| A standard state dict containing the lora layer parameters. The key should be prefixed with an | |
| additional <code>text_encoder</code> to distinguish between unet lora layers.`,name:"state_dict"},{anchor:"diffusers.loaders.SD3LoraLoaderMixin.load_lora_into_text_encoder.network_alphas",description:`<strong>network_alphas</strong> (<code>Dict[str, float]</code>) — | |
| The value of the network alpha used for stable learning and preventing underflow. This value has the | |
| same meaning as the <code>--network_alpha</code> option in the kohya-ss trainer script. Refer to <a href="https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning" rel="nofollow">this | |
| link</a>.`,name:"network_alphas"},{anchor:"diffusers.loaders.SD3LoraLoaderMixin.load_lora_into_text_encoder.text_encoder",description:`<strong>text_encoder</strong> (<code>CLIPTextModel</code>) — | |
| The text encoder model to load the LoRA layers into.`,name:"text_encoder"},{anchor:"diffusers.loaders.SD3LoraLoaderMixin.load_lora_into_text_encoder.prefix",description:`<strong>prefix</strong> (<code>str</code>) — | |
| Expected prefix of the <code>text_encoder</code> in the <code>state_dict</code>.`,name:"prefix"},{anchor:"diffusers.loaders.SD3LoraLoaderMixin.load_lora_into_text_encoder.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>) — | |
| How much to scale the output of the lora linear layer before it is added with the output of the regular | |
| lora layer.`,name:"lora_scale"},{anchor:"diffusers.loaders.SD3LoraLoaderMixin.load_lora_into_text_encoder.adapter_name",description:`<strong>adapter_name</strong> (<code>str</code>, <em>optional</em>) — | |
| Adapter name to be used for referencing the loaded adapter model. If not specified, it will use | |
| <code>default_{i}</code> where i is the total number of adapters being loaded.`,name:"adapter_name"},{anchor:"diffusers.loaders.SD3LoraLoaderMixin.load_lora_into_text_encoder.low_cpu_mem_usage",description:`<strong>low_cpu_mem_usage</strong> (<code>bool</code>, <em>optional</em>) — | |
| Speed up model loading by only loading the pretrained LoRA weights and not initializing the random | |
| weights.`,name:"low_cpu_mem_usage"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L1161"}}),xo=new $({props:{name:"load_lora_into_transformer",anchor:"diffusers.loaders.SD3LoraLoaderMixin.load_lora_into_transformer",parameters:[{name:"state_dict",val:""},{name:"transformer",val:""},{name:"adapter_name",val:" = None"},{name:"_pipeline",val:" = None"},{name:"low_cpu_mem_usage",val:" = False"}],parametersDescription:[{anchor:"diffusers.loaders.SD3LoraLoaderMixin.load_lora_into_transformer.state_dict",description:`<strong>state_dict</strong> (<code>dict</code>) — | |
| A standard state dict containing the lora layer parameters. The keys can either be indexed directly | |
| into the unet or prefixed with an additional <code>unet</code> which can be used to distinguish between text | |
| encoder lora layers.`,name:"state_dict"},{anchor:"diffusers.loaders.SD3LoraLoaderMixin.load_lora_into_transformer.transformer",description:`<strong>transformer</strong> (<code>SD3Transformer2DModel</code>) — | |
| The Transformer model to load the LoRA layers into.`,name:"transformer"},{anchor:"diffusers.loaders.SD3LoraLoaderMixin.load_lora_into_transformer.adapter_name",description:`<strong>adapter_name</strong> (<code>str</code>, <em>optional</em>) — | |
| Adapter name to be used for referencing the loaded adapter model. If not specified, it will use | |
| <code>default_{i}</code> where i is the total number of adapters being loaded.`,name:"adapter_name"},{anchor:"diffusers.loaders.SD3LoraLoaderMixin.load_lora_into_transformer.low_cpu_mem_usage",description:`<strong>low_cpu_mem_usage</strong> (<code>bool</code>, <em>optional</em>) — | |
| Speed up model loading by only loading the pretrained LoRA weights and not initializing the random | |
| weights.`,name:"low_cpu_mem_usage"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L1125"}}),Lo=new $({props:{name:"load_lora_weights",anchor:"diffusers.loaders.SD3LoraLoaderMixin.load_lora_weights",parameters:[{name:"pretrained_model_name_or_path_or_dict",val:": typing.Union[str, typing.Dict[str, torch.Tensor]]"},{name:"adapter_name",val:" = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.loaders.SD3LoraLoaderMixin.load_lora_weights.pretrained_model_name_or_path_or_dict",description:`<strong>pretrained_model_name_or_path_or_dict</strong> (<code>str</code> or <code>os.PathLike</code> or <code>dict</code>) — | |
| See <a href="/docs/diffusers/pr_10725/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict">lora_state_dict()</a>.`,name:"pretrained_model_name_or_path_or_dict"},{anchor:"diffusers.loaders.SD3LoraLoaderMixin.load_lora_weights.adapter_name",description:`<strong>adapter_name</strong> (<code>str</code>, <em>optional</em>) — | |
| Adapter name to be used for referencing the loaded adapter model. If not specified, it will use | |
| <code>default_{i}</code> where i is the total number of adapters being loaded.`,name:"adapter_name"},{anchor:"diffusers.loaders.SD3LoraLoaderMixin.load_lora_weights.low_cpu_mem_usage",description:`<strong>low_cpu_mem_usage</strong> (<code>bool</code>, <em>optional</em>) — | |
| Speed up model loading by only loading the pretrained LoRA weights and not initializing the random | |
| weights.`,name:"low_cpu_mem_usage"},{anchor:"diffusers.loaders.SD3LoraLoaderMixin.load_lora_weights.kwargs",description:`<strong>kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| See <a href="/docs/diffusers/pr_10725/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict">lora_state_dict()</a>.`,name:"kwargs"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L1040"}}),bo=new $({props:{name:"lora_state_dict",anchor:"diffusers.loaders.SD3LoraLoaderMixin.lora_state_dict",parameters:[{name:"pretrained_model_name_or_path_or_dict",val:": typing.Union[str, typing.Dict[str, torch.Tensor]]"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.loaders.SD3LoraLoaderMixin.lora_state_dict.pretrained_model_name_or_path_or_dict",description:`<strong>pretrained_model_name_or_path_or_dict</strong> (<code>str</code> or <code>os.PathLike</code> or <code>dict</code>) — | |
| Can be either:</p> | |
| <ul> | |
| <li>A string, the <em>model id</em> (for example <code>google/ddpm-celebahq-256</code>) of a pretrained model hosted on | |
| the Hub.</li> | |
| <li>A path to a <em>directory</em> (for example <code>./my_model_directory</code>) containing the model weights saved | |
| with <a href="/docs/diffusers/pr_10725/en/api/models/overview#diffusers.ModelMixin.save_pretrained">ModelMixin.save_pretrained()</a>.</li> | |
| <li>A <a href="https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict" rel="nofollow">torch state | |
| dict</a>.</li> | |
| </ul>`,name:"pretrained_model_name_or_path_or_dict"},{anchor:"diffusers.loaders.SD3LoraLoaderMixin.lora_state_dict.cache_dir",description:`<strong>cache_dir</strong> (<code>Union[str, os.PathLike]</code>, <em>optional</em>) — | |
| Path to a directory where a downloaded pretrained model configuration is cached if the standard cache | |
| is not used.`,name:"cache_dir"},{anchor:"diffusers.loaders.SD3LoraLoaderMixin.lora_state_dict.force_download",description:`<strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether or not to force the (re-)download of the model weights and configuration files, overriding the | |
| cached versions if they exist.`,name:"force_download"},{anchor:"diffusers.loaders.SD3LoraLoaderMixin.lora_state_dict.proxies",description:`<strong>proxies</strong> (<code>Dict[str, str]</code>, <em>optional</em>) — | |
| A dictionary of proxy servers to use by protocol or endpoint, for example, <code>{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}</code>. The proxies are used on each request.`,name:"proxies"},{anchor:"diffusers.loaders.SD3LoraLoaderMixin.lora_state_dict.local_files_only",description:`<strong>local_files_only</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether to only load local model weights and configuration files or not. If set to <code>True</code>, the model | |
| won’t be downloaded from the Hub.`,name:"local_files_only"},{anchor:"diffusers.loaders.SD3LoraLoaderMixin.lora_state_dict.token",description:`<strong>token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) — | |
| The token to use as HTTP bearer authorization for remote files. If <code>True</code>, the token generated from | |
| <code>diffusers-cli login</code> (stored in <code>~/.huggingface</code>) is used.`,name:"token"},{anchor:"diffusers.loaders.SD3LoraLoaderMixin.lora_state_dict.revision",description:`<strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"main"</code>) — | |
| The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier | |
| allowed by Git.`,name:"revision"},{anchor:"diffusers.loaders.SD3LoraLoaderMixin.lora_state_dict.subfolder",description:`<strong>subfolder</strong> (<code>str</code>, <em>optional</em>, defaults to <code>""</code>) — | |
| The subfolder location of a model file within a larger model repository on the Hub or locally.`,name:"subfolder"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L943"}}),De=new Y({props:{warning:!0,$$slots:{default:[ji]},$$scope:{ctx:D}}}),vo=new $({props:{name:"save_lora_weights",anchor:"diffusers.loaders.SD3LoraLoaderMixin.save_lora_weights",parameters:[{name:"save_directory",val:": typing.Union[str, os.PathLike]"},{name:"transformer_lora_layers",val:": typing.Dict[str, torch.nn.modules.module.Module] = None"},{name:"text_encoder_lora_layers",val:": typing.Dict[str, typing.Union[torch.nn.modules.module.Module, torch.Tensor]] = None"},{name:"text_encoder_2_lora_layers",val:": typing.Dict[str, typing.Union[torch.nn.modules.module.Module, torch.Tensor]] = None"},{name:"is_main_process",val:": bool = True"},{name:"weight_name",val:": str = None"},{name:"save_function",val:": typing.Callable = None"},{name:"safe_serialization",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.loaders.SD3LoraLoaderMixin.save_lora_weights.save_directory",description:`<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) — | |
| Directory to save LoRA parameters to. Will be created if it doesn’t exist.`,name:"save_directory"},{anchor:"diffusers.loaders.SD3LoraLoaderMixin.save_lora_weights.transformer_lora_layers",description:`<strong>transformer_lora_layers</strong> (<code>Dict[str, torch.nn.Module]</code> or <code>Dict[str, torch.Tensor]</code>) — | |
| State dict of the LoRA layers corresponding to the <code>transformer</code>.`,name:"transformer_lora_layers"},{anchor:"diffusers.loaders.SD3LoraLoaderMixin.save_lora_weights.text_encoder_lora_layers",description:`<strong>text_encoder_lora_layers</strong> (<code>Dict[str, torch.nn.Module]</code> or <code>Dict[str, torch.Tensor]</code>) — | |
| State dict of the LoRA layers corresponding to the <code>text_encoder</code>. Must explicitly pass the text | |
| encoder LoRA state dict because it comes from 🤗 Transformers.`,name:"text_encoder_lora_layers"},{anchor:"diffusers.loaders.SD3LoraLoaderMixin.save_lora_weights.text_encoder_2_lora_layers",description:`<strong>text_encoder_2_lora_layers</strong> (<code>Dict[str, torch.nn.Module]</code> or <code>Dict[str, torch.Tensor]</code>) — | |
| State dict of the LoRA layers corresponding to the <code>text_encoder_2</code>. Must explicitly pass the text | |
| encoder LoRA state dict because it comes from 🤗 Transformers.`,name:"text_encoder_2_lora_layers"},{anchor:"diffusers.loaders.SD3LoraLoaderMixin.save_lora_weights.is_main_process",description:`<strong>is_main_process</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether the process calling this is the main process or not. Useful during distributed training and you | |
| need to call this function on all processes. In this case, set <code>is_main_process=True</code> only on the main | |
| process to avoid race conditions.`,name:"is_main_process"},{anchor:"diffusers.loaders.SD3LoraLoaderMixin.save_lora_weights.save_function",description:`<strong>save_function</strong> (<code>Callable</code>) — | |
| The function to use to save the state dictionary. Useful during distributed training when you need to | |
| replace <code>torch.save</code> with another method. Can be configured with the environment variable | |
| <code>DIFFUSERS_SAVE_MODE</code>.`,name:"save_function"},{anchor:"diffusers.loaders.SD3LoraLoaderMixin.save_lora_weights.safe_serialization",description:`<strong>safe_serialization</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether to save the model using <code>safetensors</code> or the traditional PyTorch way with <code>pickle</code>.`,name:"safe_serialization"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L1211"}}),wo=new me({props:{title:"FluxLoraLoaderMixin",local:"diffusers.loaders.FluxLoraLoaderMixin",headingTag:"h2"}}),$o=new $({props:{name:"class diffusers.loaders.FluxLoraLoaderMixin",anchor:"diffusers.loaders.FluxLoraLoaderMixin",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L1338"}}),Mo=new $({props:{name:"load_lora_into_text_encoder",anchor:"diffusers.loaders.FluxLoraLoaderMixin.load_lora_into_text_encoder",parameters:[{name:"state_dict",val:""},{name:"network_alphas",val:""},{name:"text_encoder",val:""},{name:"prefix",val:" = None"},{name:"lora_scale",val:" = 1.0"},{name:"adapter_name",val:" = None"},{name:"_pipeline",val:" = None"},{name:"low_cpu_mem_usage",val:" = False"}],parametersDescription:[{anchor:"diffusers.loaders.FluxLoraLoaderMixin.load_lora_into_text_encoder.state_dict",description:`<strong>state_dict</strong> (<code>dict</code>) — | |
| A standard state dict containing the lora layer parameters. The key should be prefixed with an | |
| additional <code>text_encoder</code> to distinguish between unet lora layers.`,name:"state_dict"},{anchor:"diffusers.loaders.FluxLoraLoaderMixin.load_lora_into_text_encoder.network_alphas",description:`<strong>network_alphas</strong> (<code>Dict[str, float]</code>) — | |
| The value of the network alpha used for stable learning and preventing underflow. This value has the | |
| same meaning as the <code>--network_alpha</code> option in the kohya-ss trainer script. Refer to <a href="https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning" rel="nofollow">this | |
| link</a>.`,name:"network_alphas"},{anchor:"diffusers.loaders.FluxLoraLoaderMixin.load_lora_into_text_encoder.text_encoder",description:`<strong>text_encoder</strong> (<code>CLIPTextModel</code>) — | |
| The text encoder model to load the LoRA layers into.`,name:"text_encoder"},{anchor:"diffusers.loaders.FluxLoraLoaderMixin.load_lora_into_text_encoder.prefix",description:`<strong>prefix</strong> (<code>str</code>) — | |
| Expected prefix of the <code>text_encoder</code> in the <code>state_dict</code>.`,name:"prefix"},{anchor:"diffusers.loaders.FluxLoraLoaderMixin.load_lora_into_text_encoder.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>) — | |
| How much to scale the output of the lora linear layer before it is added with the output of the regular | |
| lora layer.`,name:"lora_scale"},{anchor:"diffusers.loaders.FluxLoraLoaderMixin.load_lora_into_text_encoder.adapter_name",description:`<strong>adapter_name</strong> (<code>str</code>, <em>optional</em>) — | |
| Adapter name to be used for referencing the loaded adapter model. If not specified, it will use | |
| <code>default_{i}</code> where i is the total number of adapters being loaded.`,name:"adapter_name"},{anchor:"diffusers.loaders.FluxLoraLoaderMixin.load_lora_into_text_encoder.low_cpu_mem_usage",description:`<strong>low_cpu_mem_usage</strong> (<code>bool</code>, <em>optional</em>) — | |
| Speed up model loading by only loading the pretrained LoRA weights and not initializing the random | |
| weights.`,name:"low_cpu_mem_usage"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L1691"}}),yo=new $({props:{name:"load_lora_into_transformer",anchor:"diffusers.loaders.FluxLoraLoaderMixin.load_lora_into_transformer",parameters:[{name:"state_dict",val:""},{name:"network_alphas",val:""},{name:"transformer",val:""},{name:"adapter_name",val:" = None"},{name:"_pipeline",val:" = None"},{name:"low_cpu_mem_usage",val:" = False"}],parametersDescription:[{anchor:"diffusers.loaders.FluxLoraLoaderMixin.load_lora_into_transformer.state_dict",description:`<strong>state_dict</strong> (<code>dict</code>) — | |
| A standard state dict containing the lora layer parameters. The keys can either be indexed directly | |
| into the unet or prefixed with an additional <code>unet</code> which can be used to distinguish between text | |
| encoder lora layers.`,name:"state_dict"},{anchor:"diffusers.loaders.FluxLoraLoaderMixin.load_lora_into_transformer.network_alphas",description:`<strong>network_alphas</strong> (<code>Dict[str, float]</code>) — | |
| The value of the network alpha used for stable learning and preventing underflow. This value has the | |
| same meaning as the <code>--network_alpha</code> option in the kohya-ss trainer script. Refer to <a href="https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning" rel="nofollow">this | |
| link</a>.`,name:"network_alphas"},{anchor:"diffusers.loaders.FluxLoraLoaderMixin.load_lora_into_transformer.transformer",description:`<strong>transformer</strong> (<code>FluxTransformer2DModel</code>) — | |
| The Transformer model to load the LoRA layers into.`,name:"transformer"},{anchor:"diffusers.loaders.FluxLoraLoaderMixin.load_lora_into_transformer.adapter_name",description:`<strong>adapter_name</strong> (<code>str</code>, <em>optional</em>) — | |
| Adapter name to be used for referencing the loaded adapter model. If not specified, it will use | |
| <code>default_{i}</code> where i is the total number of adapters being loaded.`,name:"adapter_name"},{anchor:"diffusers.loaders.FluxLoraLoaderMixin.load_lora_into_transformer.low_cpu_mem_usage",description:`<strong>low_cpu_mem_usage</strong> (<code>bool</code>, <em>optional</em>) — | |
| Speed up model loading by only loading the pretrained LoRA weights and not initializing the random | |
| weights.`,name:"low_cpu_mem_usage"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L1594"}}),Do=new $({props:{name:"load_lora_weights",anchor:"diffusers.loaders.FluxLoraLoaderMixin.load_lora_weights",parameters:[{name:"pretrained_model_name_or_path_or_dict",val:": typing.Union[str, typing.Dict[str, torch.Tensor]]"},{name:"adapter_name",val:" = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.loaders.FluxLoraLoaderMixin.load_lora_weights.pretrained_model_name_or_path_or_dict",description:`<strong>pretrained_model_name_or_path_or_dict</strong> (<code>str</code> or <code>os.PathLike</code> or <code>dict</code>) — | |
| See <a href="/docs/diffusers/pr_10725/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict">lora_state_dict()</a>.`,name:"pretrained_model_name_or_path_or_dict"},{anchor:"diffusers.loaders.FluxLoraLoaderMixin.load_lora_weights.kwargs",description:`<strong>kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| See <a href="/docs/diffusers/pr_10725/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict">lora_state_dict()</a>.`,name:"kwargs"},{anchor:"diffusers.loaders.FluxLoraLoaderMixin.load_lora_weights.adapter_name",description:`<strong>adapter_name</strong> (<code>str</code>, <em>optional</em>) — | |
| Adapter name to be used for referencing the loaded adapter model. If not specified, it will use | |
| <code>default_{i}</code> where i is the total number of adapters being loaded.`,name:"adapter_name"},{anchor:"diffusers.loaders.FluxLoraLoaderMixin.load_lora_weights.low_cpu_mem_usage",description:`<strong>low_cpu_mem_usage</strong> (<code>bool</code>, <em>optional</em>) — | |
| \`Speed up model loading by only loading the pretrained LoRA weights and not initializing the random | |
| weights.`,name:"low_cpu_mem_usage"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L1485"}}),To=new $({props:{name:"lora_state_dict",anchor:"diffusers.loaders.FluxLoraLoaderMixin.lora_state_dict",parameters:[{name:"pretrained_model_name_or_path_or_dict",val:": typing.Union[str, typing.Dict[str, torch.Tensor]]"},{name:"return_alphas",val:": bool = False"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.loaders.FluxLoraLoaderMixin.lora_state_dict.pretrained_model_name_or_path_or_dict",description:`<strong>pretrained_model_name_or_path_or_dict</strong> (<code>str</code> or <code>os.PathLike</code> or <code>dict</code>) — | |
| Can be either:</p> | |
| <ul> | |
| <li>A string, the <em>model id</em> (for example <code>google/ddpm-celebahq-256</code>) of a pretrained model hosted on | |
| the Hub.</li> | |
| <li>A path to a <em>directory</em> (for example <code>./my_model_directory</code>) containing the model weights saved | |
| with <a href="/docs/diffusers/pr_10725/en/api/models/overview#diffusers.ModelMixin.save_pretrained">ModelMixin.save_pretrained()</a>.</li> | |
| <li>A <a href="https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict" rel="nofollow">torch state | |
| dict</a>.</li> | |
| </ul>`,name:"pretrained_model_name_or_path_or_dict"},{anchor:"diffusers.loaders.FluxLoraLoaderMixin.lora_state_dict.cache_dir",description:`<strong>cache_dir</strong> (<code>Union[str, os.PathLike]</code>, <em>optional</em>) — | |
| Path to a directory where a downloaded pretrained model configuration is cached if the standard cache | |
| is not used.`,name:"cache_dir"},{anchor:"diffusers.loaders.FluxLoraLoaderMixin.lora_state_dict.force_download",description:`<strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether or not to force the (re-)download of the model weights and configuration files, overriding the | |
| cached versions if they exist.`,name:"force_download"},{anchor:"diffusers.loaders.FluxLoraLoaderMixin.lora_state_dict.proxies",description:`<strong>proxies</strong> (<code>Dict[str, str]</code>, <em>optional</em>) — | |
| A dictionary of proxy servers to use by protocol or endpoint, for example, <code>{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}</code>. The proxies are used on each request.`,name:"proxies"},{anchor:"diffusers.loaders.FluxLoraLoaderMixin.lora_state_dict.local_files_only",description:`<strong>local_files_only</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether to only load local model weights and configuration files or not. If set to <code>True</code>, the model | |
| won’t be downloaded from the Hub.`,name:"local_files_only"},{anchor:"diffusers.loaders.FluxLoraLoaderMixin.lora_state_dict.token",description:`<strong>token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) — | |
| The token to use as HTTP bearer authorization for remote files. If <code>True</code>, the token generated from | |
| <code>diffusers-cli login</code> (stored in <code>~/.huggingface</code>) is used.`,name:"token"},{anchor:"diffusers.loaders.FluxLoraLoaderMixin.lora_state_dict.revision",description:`<strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"main"</code>) — | |
| The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier | |
| allowed by Git.`,name:"revision"},{anchor:"diffusers.loaders.FluxLoraLoaderMixin.lora_state_dict.subfolder",description:`<strong>subfolder</strong> (<code>str</code>, <em>optional</em>, defaults to <code>""</code>) — | |
| The subfolder location of a model file within a larger model repository on the Hub or locally.`,name:"subfolder"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L1351"}}),ke=new Y({props:{warning:!0,$$slots:{default:[Gi]},$$scope:{ctx:D}}}),So=new $({props:{name:"save_lora_weights",anchor:"diffusers.loaders.FluxLoraLoaderMixin.save_lora_weights",parameters:[{name:"save_directory",val:": typing.Union[str, os.PathLike]"},{name:"transformer_lora_layers",val:": typing.Dict[str, typing.Union[torch.nn.modules.module.Module, torch.Tensor]] = None"},{name:"text_encoder_lora_layers",val:": typing.Dict[str, torch.nn.modules.module.Module] = None"},{name:"is_main_process",val:": bool = True"},{name:"weight_name",val:": str = None"},{name:"save_function",val:": typing.Callable = None"},{name:"safe_serialization",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.loaders.FluxLoraLoaderMixin.save_lora_weights.save_directory",description:`<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) — | |
| Directory to save LoRA parameters to. Will be created if it doesn’t exist.`,name:"save_directory"},{anchor:"diffusers.loaders.FluxLoraLoaderMixin.save_lora_weights.transformer_lora_layers",description:`<strong>transformer_lora_layers</strong> (<code>Dict[str, torch.nn.Module]</code> or <code>Dict[str, torch.Tensor]</code>) — | |
| State dict of the LoRA layers corresponding to the <code>transformer</code>.`,name:"transformer_lora_layers"},{anchor:"diffusers.loaders.FluxLoraLoaderMixin.save_lora_weights.text_encoder_lora_layers",description:`<strong>text_encoder_lora_layers</strong> (<code>Dict[str, torch.nn.Module]</code> or <code>Dict[str, torch.Tensor]</code>) — | |
| State dict of the LoRA layers corresponding to the <code>text_encoder</code>. Must explicitly pass the text | |
| encoder LoRA state dict because it comes from 🤗 Transformers.`,name:"text_encoder_lora_layers"},{anchor:"diffusers.loaders.FluxLoraLoaderMixin.save_lora_weights.is_main_process",description:`<strong>is_main_process</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether the process calling this is the main process or not. Useful during distributed training and you | |
| need to call this function on all processes. In this case, set <code>is_main_process=True</code> only on the main | |
| process to avoid race conditions.`,name:"is_main_process"},{anchor:"diffusers.loaders.FluxLoraLoaderMixin.save_lora_weights.save_function",description:`<strong>save_function</strong> (<code>Callable</code>) — | |
| The function to use to save the state dictionary. Useful during distributed training when you need to | |
| replace <code>torch.save</code> with another method. Can be configured with the environment variable | |
| <code>DIFFUSERS_SAVE_MODE</code>.`,name:"save_function"},{anchor:"diffusers.loaders.FluxLoraLoaderMixin.save_lora_weights.safe_serialization",description:`<strong>safe_serialization</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether to save the model using <code>safetensors</code> or the traditional PyTorch way with <code>pickle</code>.`,name:"safe_serialization"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L1741"}}),Co=new $({props:{name:"unfuse_lora",anchor:"diffusers.loaders.FluxLoraLoaderMixin.unfuse_lora",parameters:[{name:"components",val:": typing.List[str] = ['transformer', 'text_encoder']"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.loaders.FluxLoraLoaderMixin.unfuse_lora.components",description:"<strong>components</strong> (<code>List[str]</code>) — List of LoRA-injectable components to unfuse LoRA from.",name:"components"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L1852"}}),Re=new Y({props:{warning:!0,$$slots:{default:[Ji]},$$scope:{ctx:D}}}),ko=new $({props:{name:"unload_lora_weights",anchor:"diffusers.loaders.FluxLoraLoaderMixin.unload_lora_weights",parameters:[{name:"reset_to_overwritten_params",val:" = False"}],parametersDescription:[{anchor:"diffusers.loaders.FluxLoraLoaderMixin.unload_lora_weights.reset_to_overwritten_params",description:`<strong>reset_to_overwritten_params</strong> (<code>bool</code>, defaults to <code>False</code>) — Whether to reset the LoRA-loaded modules | |
| to their original params. Refer to the <a href="https://huggingface.co/docs/diffusers/main/en/api/pipelines/flux" rel="nofollow">Flux | |
| documentation</a> to learn more.`,name:"reset_to_overwritten_params"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L1873"}}),Pe=new ma({props:{anchor:"diffusers.loaders.FluxLoraLoaderMixin.unload_lora_weights.example",$$slots:{default:[Zi]},$$scope:{ctx:D}}}),Ao=new me({props:{title:"CogVideoXLoraLoaderMixin",local:"diffusers.loaders.CogVideoXLoraLoaderMixin",headingTag:"h2"}}),Ro=new $({props:{name:"class diffusers.loaders.CogVideoXLoraLoaderMixin",anchor:"diffusers.loaders.CogVideoXLoraLoaderMixin",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L2291"}}),Po=new $({props:{name:"load_lora_into_transformer",anchor:"diffusers.loaders.CogVideoXLoraLoaderMixin.load_lora_into_transformer",parameters:[{name:"state_dict",val:""},{name:"transformer",val:""},{name:"adapter_name",val:" = None"},{name:"_pipeline",val:" = None"},{name:"low_cpu_mem_usage",val:" = False"}],parametersDescription:[{anchor:"diffusers.loaders.CogVideoXLoraLoaderMixin.load_lora_into_transformer.state_dict",description:`<strong>state_dict</strong> (<code>dict</code>) — | |
| A standard state dict containing the lora layer parameters. The keys can either be indexed directly | |
| into the unet or prefixed with an additional <code>unet</code> which can be used to distinguish between text | |
| encoder lora layers.`,name:"state_dict"},{anchor:"diffusers.loaders.CogVideoXLoraLoaderMixin.load_lora_into_transformer.transformer",description:`<strong>transformer</strong> (<code>CogVideoXTransformer3DModel</code>) — | |
| The Transformer model to load the LoRA layers into.`,name:"transformer"},{anchor:"diffusers.loaders.CogVideoXLoraLoaderMixin.load_lora_into_transformer.adapter_name",description:`<strong>adapter_name</strong> (<code>str</code>, <em>optional</em>) — | |
| Adapter name to be used for referencing the loaded adapter model. If not specified, it will use | |
| <code>default_{i}</code> where i is the total number of adapters being loaded.`,name:"adapter_name"},{anchor:"diffusers.loaders.CogVideoXLoraLoaderMixin.load_lora_into_transformer.low_cpu_mem_usage",description:`<strong>low_cpu_mem_usage</strong> (<code>bool</code>, <em>optional</em>) — | |
| Speed up model loading by only loading the pretrained LoRA weights and not initializing the random | |
| weights.`,name:"low_cpu_mem_usage"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L2447"}}),Io=new $({props:{name:"load_lora_weights",anchor:"diffusers.loaders.CogVideoXLoraLoaderMixin.load_lora_weights",parameters:[{name:"pretrained_model_name_or_path_or_dict",val:": typing.Union[str, typing.Dict[str, torch.Tensor]]"},{name:"adapter_name",val:" = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.loaders.CogVideoXLoraLoaderMixin.load_lora_weights.pretrained_model_name_or_path_or_dict",description:`<strong>pretrained_model_name_or_path_or_dict</strong> (<code>str</code> or <code>os.PathLike</code> or <code>dict</code>) — | |
| See <a href="/docs/diffusers/pr_10725/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict">lora_state_dict()</a>.`,name:"pretrained_model_name_or_path_or_dict"},{anchor:"diffusers.loaders.CogVideoXLoraLoaderMixin.load_lora_weights.adapter_name",description:`<strong>adapter_name</strong> (<code>str</code>, <em>optional</em>) — | |
| Adapter name to be used for referencing the loaded adapter model. If not specified, it will use | |
| <code>default_{i}</code> where i is the total number of adapters being loaded.`,name:"adapter_name"},{anchor:"diffusers.loaders.CogVideoXLoraLoaderMixin.load_lora_weights.low_cpu_mem_usage",description:`<strong>low_cpu_mem_usage</strong> (<code>bool</code>, <em>optional</em>) — | |
| Speed up model loading by only loading the pretrained LoRA weights and not initializing the random | |
| weights.`,name:"low_cpu_mem_usage"},{anchor:"diffusers.loaders.CogVideoXLoraLoaderMixin.load_lora_weights.kwargs",description:`<strong>kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| See <a href="/docs/diffusers/pr_10725/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict">lora_state_dict()</a>.`,name:"kwargs"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L2397"}}),Ho=new $({props:{name:"lora_state_dict",anchor:"diffusers.loaders.CogVideoXLoraLoaderMixin.lora_state_dict",parameters:[{name:"pretrained_model_name_or_path_or_dict",val:": typing.Union[str, typing.Dict[str, torch.Tensor]]"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.loaders.CogVideoXLoraLoaderMixin.lora_state_dict.pretrained_model_name_or_path_or_dict",description:`<strong>pretrained_model_name_or_path_or_dict</strong> (<code>str</code> or <code>os.PathLike</code> or <code>dict</code>) — | |
| Can be either:</p> | |
| <ul> | |
| <li>A string, the <em>model id</em> (for example <code>google/ddpm-celebahq-256</code>) of a pretrained model hosted on | |
| the Hub.</li> | |
| <li>A path to a <em>directory</em> (for example <code>./my_model_directory</code>) containing the model weights saved | |
| with <a href="/docs/diffusers/pr_10725/en/api/models/overview#diffusers.ModelMixin.save_pretrained">ModelMixin.save_pretrained()</a>.</li> | |
| <li>A <a href="https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict" rel="nofollow">torch state | |
| dict</a>.</li> | |
| </ul>`,name:"pretrained_model_name_or_path_or_dict"},{anchor:"diffusers.loaders.CogVideoXLoraLoaderMixin.lora_state_dict.cache_dir",description:`<strong>cache_dir</strong> (<code>Union[str, os.PathLike]</code>, <em>optional</em>) — | |
| Path to a directory where a downloaded pretrained model configuration is cached if the standard cache | |
| is not used.`,name:"cache_dir"},{anchor:"diffusers.loaders.CogVideoXLoraLoaderMixin.lora_state_dict.force_download",description:`<strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether or not to force the (re-)download of the model weights and configuration files, overriding the | |
| cached versions if they exist.`,name:"force_download"},{anchor:"diffusers.loaders.CogVideoXLoraLoaderMixin.lora_state_dict.proxies",description:`<strong>proxies</strong> (<code>Dict[str, str]</code>, <em>optional</em>) — | |
| A dictionary of proxy servers to use by protocol or endpoint, for example, <code>{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}</code>. The proxies are used on each request.`,name:"proxies"},{anchor:"diffusers.loaders.CogVideoXLoraLoaderMixin.lora_state_dict.local_files_only",description:`<strong>local_files_only</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether to only load local model weights and configuration files or not. If set to <code>True</code>, the model | |
| won’t be downloaded from the Hub.`,name:"local_files_only"},{anchor:"diffusers.loaders.CogVideoXLoraLoaderMixin.lora_state_dict.token",description:`<strong>token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) — | |
| The token to use as HTTP bearer authorization for remote files. If <code>True</code>, the token generated from | |
| <code>diffusers-cli login</code> (stored in <code>~/.huggingface</code>) is used.`,name:"token"},{anchor:"diffusers.loaders.CogVideoXLoraLoaderMixin.lora_state_dict.revision",description:`<strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"main"</code>) — | |
| The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier | |
| allowed by Git.`,name:"revision"},{anchor:"diffusers.loaders.CogVideoXLoraLoaderMixin.lora_state_dict.subfolder",description:`<strong>subfolder</strong> (<code>str</code>, <em>optional</em>, defaults to <code>""</code>) — | |
| The subfolder location of a model file within a larger model repository on the Hub or locally.`,name:"subfolder"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L2299"}}),Ee=new Y({props:{warning:!0,$$slots:{default:[Yi]},$$scope:{ctx:D}}}),Eo=new $({props:{name:"save_lora_weights",anchor:"diffusers.loaders.CogVideoXLoraLoaderMixin.save_lora_weights",parameters:[{name:"save_directory",val:": typing.Union[str, os.PathLike]"},{name:"transformer_lora_layers",val:": typing.Dict[str, typing.Union[torch.nn.modules.module.Module, torch.Tensor]] = None"},{name:"is_main_process",val:": bool = True"},{name:"weight_name",val:": str = None"},{name:"save_function",val:": typing.Callable = None"},{name:"safe_serialization",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.loaders.CogVideoXLoraLoaderMixin.save_lora_weights.save_directory",description:`<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) — | |
| Directory to save LoRA parameters to. Will be created if it doesn’t exist.`,name:"save_directory"},{anchor:"diffusers.loaders.CogVideoXLoraLoaderMixin.save_lora_weights.transformer_lora_layers",description:`<strong>transformer_lora_layers</strong> (<code>Dict[str, torch.nn.Module]</code> or <code>Dict[str, torch.Tensor]</code>) — | |
| State dict of the LoRA layers corresponding to the <code>transformer</code>.`,name:"transformer_lora_layers"},{anchor:"diffusers.loaders.CogVideoXLoraLoaderMixin.save_lora_weights.is_main_process",description:`<strong>is_main_process</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether the process calling this is the main process or not. Useful during distributed training and you | |
| need to call this function on all processes. In this case, set <code>is_main_process=True</code> only on the main | |
| process to avoid race conditions.`,name:"is_main_process"},{anchor:"diffusers.loaders.CogVideoXLoraLoaderMixin.save_lora_weights.save_function",description:`<strong>save_function</strong> (<code>Callable</code>) — | |
| The function to use to save the state dictionary. Useful during distributed training when you need to | |
| replace <code>torch.save</code> with another method. Can be configured with the environment variable | |
| <code>DIFFUSERS_SAVE_MODE</code>.`,name:"save_function"},{anchor:"diffusers.loaders.CogVideoXLoraLoaderMixin.save_lora_weights.safe_serialization",description:`<strong>safe_serialization</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether to save the model using <code>safetensors</code> or the traditional PyTorch way with <code>pickle</code>.`,name:"safe_serialization"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L2484"}}),Uo=new $({props:{name:"unfuse_lora",anchor:"diffusers.loaders.CogVideoXLoraLoaderMixin.unfuse_lora",parameters:[{name:"components",val:": typing.List[str] = ['transformer']"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.loaders.CogVideoXLoraLoaderMixin.unfuse_lora.components",description:"<strong>components</strong> (<code>List[str]</code>) — List of LoRA-injectable components to unfuse LoRA from.",name:"components"},{anchor:"diffusers.loaders.CogVideoXLoraLoaderMixin.unfuse_lora.unfuse_transformer",description:"<strong>unfuse_transformer</strong> (<code>bool</code>, defaults to <code>True</code>) — Whether to unfuse the UNet LoRA parameters.",name:"unfuse_transformer"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L2575"}}),Fe=new Y({props:{warning:!0,$$slots:{default:[Qi]},$$scope:{ctx:D}}}),Fo=new me({props:{title:"Mochi1LoraLoaderMixin",local:"diffusers.loaders.Mochi1LoraLoaderMixin",headingTag:"h2"}}),Vo=new $({props:{name:"class diffusers.loaders.Mochi1LoraLoaderMixin",anchor:"diffusers.loaders.Mochi1LoraLoaderMixin",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L2593"}}),Xo=new $({props:{name:"load_lora_into_transformer",anchor:"diffusers.loaders.Mochi1LoraLoaderMixin.load_lora_into_transformer",parameters:[{name:"state_dict",val:""},{name:"transformer",val:""},{name:"adapter_name",val:" = None"},{name:"_pipeline",val:" = None"},{name:"low_cpu_mem_usage",val:" = False"}],parametersDescription:[{anchor:"diffusers.loaders.Mochi1LoraLoaderMixin.load_lora_into_transformer.state_dict",description:`<strong>state_dict</strong> (<code>dict</code>) — | |
| A standard state dict containing the lora layer parameters. The keys can either be indexed directly | |
| into the unet or prefixed with an additional <code>unet</code> which can be used to distinguish between text | |
| encoder lora layers.`,name:"state_dict"},{anchor:"diffusers.loaders.Mochi1LoraLoaderMixin.load_lora_into_transformer.transformer",description:`<strong>transformer</strong> (<code>MochiTransformer3DModel</code>) — | |
| The Transformer model to load the LoRA layers into.`,name:"transformer"},{anchor:"diffusers.loaders.Mochi1LoraLoaderMixin.load_lora_into_transformer.adapter_name",description:`<strong>adapter_name</strong> (<code>str</code>, <em>optional</em>) — | |
| Adapter name to be used for referencing the loaded adapter model. If not specified, it will use | |
| <code>default_{i}</code> where i is the total number of adapters being loaded.`,name:"adapter_name"},{anchor:"diffusers.loaders.Mochi1LoraLoaderMixin.load_lora_into_transformer.low_cpu_mem_usage",description:`<strong>low_cpu_mem_usage</strong> (<code>bool</code>, <em>optional</em>) — | |
| Speed up model loading by only loading the pretrained LoRA weights and not initializing the random | |
| weights.`,name:"low_cpu_mem_usage"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L2750"}}),No=new $({props:{name:"load_lora_weights",anchor:"diffusers.loaders.Mochi1LoraLoaderMixin.load_lora_weights",parameters:[{name:"pretrained_model_name_or_path_or_dict",val:": typing.Union[str, typing.Dict[str, torch.Tensor]]"},{name:"adapter_name",val:" = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.loaders.Mochi1LoraLoaderMixin.load_lora_weights.pretrained_model_name_or_path_or_dict",description:`<strong>pretrained_model_name_or_path_or_dict</strong> (<code>str</code> or <code>os.PathLike</code> or <code>dict</code>) — | |
| See <a href="/docs/diffusers/pr_10725/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict">lora_state_dict()</a>.`,name:"pretrained_model_name_or_path_or_dict"},{anchor:"diffusers.loaders.Mochi1LoraLoaderMixin.load_lora_weights.adapter_name",description:`<strong>adapter_name</strong> (<code>str</code>, <em>optional</em>) — | |
| Adapter name to be used for referencing the loaded adapter model. If not specified, it will use | |
| <code>default_{i}</code> where i is the total number of adapters being loaded.`,name:"adapter_name"},{anchor:"diffusers.loaders.Mochi1LoraLoaderMixin.load_lora_weights.low_cpu_mem_usage",description:`<strong>low_cpu_mem_usage</strong> (<code>bool</code>, <em>optional</em>) — | |
| Speed up model loading by only loading the pretrained LoRA weights and not initializing the random | |
| weights.`,name:"low_cpu_mem_usage"},{anchor:"diffusers.loaders.Mochi1LoraLoaderMixin.load_lora_weights.kwargs",description:`<strong>kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| See <a href="/docs/diffusers/pr_10725/en/api/loaders/lora#diffusers.loaders.StableDiffusionLoraLoaderMixin.lora_state_dict">lora_state_dict()</a>.`,name:"kwargs"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L2700"}}),Wo=new $({props:{name:"lora_state_dict",anchor:"diffusers.loaders.Mochi1LoraLoaderMixin.lora_state_dict",parameters:[{name:"pretrained_model_name_or_path_or_dict",val:": typing.Union[str, typing.Dict[str, torch.Tensor]]"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.loaders.Mochi1LoraLoaderMixin.lora_state_dict.pretrained_model_name_or_path_or_dict",description:`<strong>pretrained_model_name_or_path_or_dict</strong> (<code>str</code> or <code>os.PathLike</code> or <code>dict</code>) — | |
| Can be either:</p> | |
| <ul> | |
| <li>A string, the <em>model id</em> (for example <code>google/ddpm-celebahq-256</code>) of a pretrained model hosted on | |
| the Hub.</li> | |
| <li>A path to a <em>directory</em> (for example <code>./my_model_directory</code>) containing the model weights saved | |
| with <a href="/docs/diffusers/pr_10725/en/api/models/overview#diffusers.ModelMixin.save_pretrained">ModelMixin.save_pretrained()</a>.</li> | |
| <li>A <a href="https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict" rel="nofollow">torch state | |
| dict</a>.</li> | |
| </ul>`,name:"pretrained_model_name_or_path_or_dict"},{anchor:"diffusers.loaders.Mochi1LoraLoaderMixin.lora_state_dict.cache_dir",description:`<strong>cache_dir</strong> (<code>Union[str, os.PathLike]</code>, <em>optional</em>) — | |
| Path to a directory where a downloaded pretrained model configuration is cached if the standard cache | |
| is not used.`,name:"cache_dir"},{anchor:"diffusers.loaders.Mochi1LoraLoaderMixin.lora_state_dict.force_download",description:`<strong>force_download</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether or not to force the (re-)download of the model weights and configuration files, overriding the | |
| cached versions if they exist.`,name:"force_download"},{anchor:"diffusers.loaders.Mochi1LoraLoaderMixin.lora_state_dict.proxies",description:`<strong>proxies</strong> (<code>Dict[str, str]</code>, <em>optional</em>) — | |
| A dictionary of proxy servers to use by protocol or endpoint, for example, <code>{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}</code>. The proxies are used on each request.`,name:"proxies"},{anchor:"diffusers.loaders.Mochi1LoraLoaderMixin.lora_state_dict.local_files_only",description:`<strong>local_files_only</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether to only load local model weights and configuration files or not. If set to <code>True</code>, the model | |
| won’t be downloaded from the Hub.`,name:"local_files_only"},{anchor:"diffusers.loaders.Mochi1LoraLoaderMixin.lora_state_dict.token",description:`<strong>token</strong> (<code>str</code> or <em>bool</em>, <em>optional</em>) — | |
| The token to use as HTTP bearer authorization for remote files. If <code>True</code>, the token generated from | |
| <code>diffusers-cli login</code> (stored in <code>~/.huggingface</code>) is used.`,name:"token"},{anchor:"diffusers.loaders.Mochi1LoraLoaderMixin.lora_state_dict.revision",description:`<strong>revision</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"main"</code>) — | |
| The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier | |
| allowed by Git.`,name:"revision"},{anchor:"diffusers.loaders.Mochi1LoraLoaderMixin.lora_state_dict.subfolder",description:`<strong>subfolder</strong> (<code>str</code>, <em>optional</em>, defaults to <code>""</code>) — | |
| The subfolder location of a model file within a larger model repository on the Hub or locally.`,name:"subfolder"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L2601"}}),Ne=new Y({props:{warning:!0,$$slots:{default:[Oi]},$$scope:{ctx:D}}}),zo=new $({props:{name:"save_lora_weights",anchor:"diffusers.loaders.Mochi1LoraLoaderMixin.save_lora_weights",parameters:[{name:"save_directory",val:": typing.Union[str, os.PathLike]"},{name:"transformer_lora_layers",val:": typing.Dict[str, typing.Union[torch.nn.modules.module.Module, torch.Tensor]] = None"},{name:"is_main_process",val:": bool = True"},{name:"weight_name",val:": str = None"},{name:"save_function",val:": typing.Callable = None"},{name:"safe_serialization",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.loaders.Mochi1LoraLoaderMixin.save_lora_weights.save_directory",description:`<strong>save_directory</strong> (<code>str</code> or <code>os.PathLike</code>) — | |
| Directory to save LoRA parameters to. Will be created if it doesn’t exist.`,name:"save_directory"},{anchor:"diffusers.loaders.Mochi1LoraLoaderMixin.save_lora_weights.transformer_lora_layers",description:`<strong>transformer_lora_layers</strong> (<code>Dict[str, torch.nn.Module]</code> or <code>Dict[str, torch.Tensor]</code>) — | |
| State dict of the LoRA layers corresponding to the <code>transformer</code>.`,name:"transformer_lora_layers"},{anchor:"diffusers.loaders.Mochi1LoraLoaderMixin.save_lora_weights.is_main_process",description:`<strong>is_main_process</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether the process calling this is the main process or not. Useful during distributed training and you | |
| need to call this function on all processes. In this case, set <code>is_main_process=True</code> only on the main | |
| process to avoid race conditions.`,name:"is_main_process"},{anchor:"diffusers.loaders.Mochi1LoraLoaderMixin.save_lora_weights.save_function",description:`<strong>save_function</strong> (<code>Callable</code>) — | |
| The function to use to save the state dictionary. Useful during distributed training when you need to | |
| replace <code>torch.save</code> with another method. Can be configured with the environment variable | |
| <code>DIFFUSERS_SAVE_MODE</code>.`,name:"save_function"},{anchor:"diffusers.loaders.Mochi1LoraLoaderMixin.save_lora_weights.safe_serialization",description:`<strong>safe_serialization</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether to save the model using <code>safetensors</code> or the traditional PyTorch way with <code>pickle</code>.`,name:"safe_serialization"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L2787"}}),qo=new $({props:{name:"unfuse_lora",anchor:"diffusers.loaders.Mochi1LoraLoaderMixin.unfuse_lora",parameters:[{name:"components",val:": typing.List[str] = ['transformer']"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.loaders.Mochi1LoraLoaderMixin.unfuse_lora.components",description:"<strong>components</strong> (<code>List[str]</code>) — List of LoRA-injectable components to unfuse LoRA from.",name:"components"},{anchor:"diffusers.loaders.Mochi1LoraLoaderMixin.unfuse_lora.unfuse_transformer",description:"<strong>unfuse_transformer</strong> (<code>bool</code>, defaults to <code>True</code>) — Whether to unfuse the UNet LoRA parameters.",name:"unfuse_transformer"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L2878"}}),ze=new Y({props:{warning:!0,$$slots:{default:[Ki]},$$scope:{ctx:D}}}),Bo=new me({props:{title:"AmusedLoraLoaderMixin",local:"diffusers.loaders.AmusedLoraLoaderMixin",headingTag:"h2"}}),jo=new $({props:{name:"class diffusers.loaders.AmusedLoraLoaderMixin",anchor:"diffusers.loaders.AmusedLoraLoaderMixin",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L2137"}}),Go=new $({props:{name:"load_lora_into_transformer",anchor:"diffusers.loaders.AmusedLoraLoaderMixin.load_lora_into_transformer",parameters:[{name:"state_dict",val:""},{name:"network_alphas",val:""},{name:"transformer",val:""},{name:"adapter_name",val:" = None"},{name:"_pipeline",val:" = None"},{name:"low_cpu_mem_usage",val:" = False"}],parametersDescription:[{anchor:"diffusers.loaders.AmusedLoraLoaderMixin.load_lora_into_transformer.state_dict",description:`<strong>state_dict</strong> (<code>dict</code>) — | |
| A standard state dict containing the lora layer parameters. The keys can either be indexed directly | |
| into the unet or prefixed with an additional <code>unet</code> which can be used to distinguish between text | |
| encoder lora layers.`,name:"state_dict"},{anchor:"diffusers.loaders.AmusedLoraLoaderMixin.load_lora_into_transformer.network_alphas",description:`<strong>network_alphas</strong> (<code>Dict[str, float]</code>) — | |
| The value of the network alpha used for stable learning and preventing underflow. This value has the | |
| same meaning as the <code>--network_alpha</code> option in the kohya-ss trainer script. Refer to <a href="https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning" rel="nofollow">this | |
| link</a>.`,name:"network_alphas"},{anchor:"diffusers.loaders.AmusedLoraLoaderMixin.load_lora_into_transformer.transformer",description:`<strong>transformer</strong> (<code>UVit2DModel</code>) — | |
| The Transformer model to load the LoRA layers into.`,name:"transformer"},{anchor:"diffusers.loaders.AmusedLoraLoaderMixin.load_lora_into_transformer.adapter_name",description:`<strong>adapter_name</strong> (<code>str</code>, <em>optional</em>) — | |
| Adapter name to be used for referencing the loaded adapter model. If not specified, it will use | |
| <code>default_{i}</code> where i is the total number of adapters being loaded.`,name:"adapter_name"},{anchor:"diffusers.loaders.AmusedLoraLoaderMixin.load_lora_into_transformer.low_cpu_mem_usage",description:`<strong>low_cpu_mem_usage</strong> (<code>bool</code>, <em>optional</em>) — | |
| Speed up model loading by only loading the pretrained LoRA weights and not initializing the random | |
| weights.`,name:"low_cpu_mem_usage"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_pipeline.py#L2142"}}),Jo=new me({props:{title:"LoraBaseMixin",local:"diffusers.loaders.lora_base.LoraBaseMixin",headingTag:"h2"}}),Zo=new $({props:{name:"class diffusers.loaders.lora_base.LoraBaseMixin",anchor:"diffusers.loaders.lora_base.LoraBaseMixin",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_base.py#L455"}}),Yo=new $({props:{name:"delete_adapters",anchor:"diffusers.loaders.lora_base.LoraBaseMixin.delete_adapters",parameters:[{name:"adapter_names",val:": typing.Union[typing.List[str], str]"}],parametersDescription:[{anchor:"diffusers.loaders.lora_base.LoraBaseMixin.delete_adapters.Deletes",description:`<strong>Deletes</strong> the LoRA layers of <code>adapter_name</code> for the unet and text-encoder(s). — | |
| adapter_names (<code>Union[List[str], str]</code>): | |
| The names of the adapter to delete. Can be a single string or a list of strings`,name:"Deletes"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_base.py#L750"}}),Qo=new $({props:{name:"fuse_lora",anchor:"diffusers.loaders.lora_base.LoraBaseMixin.fuse_lora",parameters:[{name:"components",val:": typing.List[str] = []"},{name:"lora_scale",val:": float = 1.0"},{name:"safe_fusing",val:": bool = False"},{name:"adapter_names",val:": typing.Optional[typing.List[str]] = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.loaders.lora_base.LoraBaseMixin.fuse_lora.components",description:"<strong>components</strong> — (<code>List[str]</code>): List of LoRA-injectable components to fuse the LoRAs into.",name:"components"},{anchor:"diffusers.loaders.lora_base.LoraBaseMixin.fuse_lora.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>, defaults to 1.0) — | |
| Controls how much to influence the outputs with the LoRA parameters.`,name:"lora_scale"},{anchor:"diffusers.loaders.lora_base.LoraBaseMixin.fuse_lora.safe_fusing",description:`<strong>safe_fusing</strong> (<code>bool</code>, defaults to <code>False</code>) — | |
| Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them.`,name:"safe_fusing"},{anchor:"diffusers.loaders.lora_base.LoraBaseMixin.fuse_lora.adapter_names",description:`<strong>adapter_names</strong> (<code>List[str]</code>, <em>optional</em>) — | |
| Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused.`,name:"adapter_names"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_base.py#L522"}}),Be=new Y({props:{warning:!0,$$slots:{default:[ed]},$$scope:{ctx:D}}}),je=new ma({props:{anchor:"diffusers.loaders.lora_base.LoraBaseMixin.fuse_lora.example",$$slots:{default:[od]},$$scope:{ctx:D}}}),Oo=new $({props:{name:"get_active_adapters",anchor:"diffusers.loaders.lora_base.LoraBaseMixin.get_active_adapters",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_base.py#L772"}}),Ge=new ma({props:{anchor:"diffusers.loaders.lora_base.LoraBaseMixin.get_active_adapters.example",$$slots:{default:[td]},$$scope:{ctx:D}}}),Ko=new $({props:{name:"get_list_adapters",anchor:"diffusers.loaders.lora_base.LoraBaseMixin.get_list_adapters",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_base.py#L805"}}),et=new $({props:{name:"set_lora_device",anchor:"diffusers.loaders.lora_base.LoraBaseMixin.set_lora_device",parameters:[{name:"adapter_names",val:": typing.List[str]"},{name:"device",val:": typing.Union[torch.device, str, int]"}],parametersDescription:[{anchor:"diffusers.loaders.lora_base.LoraBaseMixin.set_lora_device.adapter_names",description:`<strong>adapter_names</strong> (<code>List[str]</code>) — | |
| List of adapters to send device to.`,name:"adapter_names"},{anchor:"diffusers.loaders.lora_base.LoraBaseMixin.set_lora_device.device",description:`<strong>device</strong> (<code>Union[torch.device, str, int]</code>) — | |
| Device to send the adapters to. Can be either a torch device, a str or an integer.`,name:"device"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_base.py#L827"}}),ot=new $({props:{name:"unfuse_lora",anchor:"diffusers.loaders.lora_base.LoraBaseMixin.unfuse_lora",parameters:[{name:"components",val:": typing.List[str] = []"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.loaders.lora_base.LoraBaseMixin.unfuse_lora.components",description:"<strong>components</strong> (<code>List[str]</code>) — List of LoRA-injectable components to unfuse LoRA from.",name:"components"},{anchor:"diffusers.loaders.lora_base.LoraBaseMixin.unfuse_lora.unfuse_unet",description:"<strong>unfuse_unet</strong> (<code>bool</code>, defaults to <code>True</code>) — Whether to unfuse the UNet LoRA parameters.",name:"unfuse_unet"},{anchor:"diffusers.loaders.lora_base.LoraBaseMixin.unfuse_lora.unfuse_text_encoder",description:`<strong>unfuse_text_encoder</strong> (<code>bool</code>, defaults to <code>True</code>) — | |
| Whether to unfuse the text encoder LoRA parameters. If the text encoder wasn’t monkey-patched with the | |
| LoRA parameters then it won’t have any effect.`,name:"unfuse_text_encoder"}],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_base.py#L603"}}),Ye=new Y({props:{warning:!0,$$slots:{default:[rd]},$$scope:{ctx:D}}}),tt=new $({props:{name:"unload_lora_weights",anchor:"diffusers.loaders.lora_base.LoraBaseMixin.unload_lora_weights",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_10725/src/diffusers/loaders/lora_base.py#L499"}}),Qe=new ma({props:{anchor:"diffusers.loaders.lora_base.LoraBaseMixin.unload_lora_weights.example",$$slots:{default:[ad]},$$scope:{ctx:D}}}),rt=new Wi({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/loaders/lora.md"}}),{c(){a=n("meta"),b=t(),c=n("p"),l=t(),_(y.$$.fragment),i=t(),M=n("p"),M.innerHTML=Ds,Pr=t(),Ke=n("ul"),Ke.innerHTML=Ts,Ir=t(),_(ue.$$.fragment),Hr=t(),_(eo.$$.fragment),Er=t(),A=n("div"),_(oo.$$.fragment),ha=t(),pt=n("p"),pt.innerHTML=Ss,ga=t(),he=n("div"),_(to.$$.fragment),xa=t(),_t=n("p"),_t.innerHTML=Cs,La=t(),ge=n("div"),_(ro.$$.fragment),ba=t(),mt=n("p"),mt.innerHTML=ks,va=t(),V=n("div"),_(ao.$$.fragment),wa=t(),ut=n("p"),ut.innerHTML=As,$a=t(),ht=n("p"),ht.innerHTML=Rs,Ma=t(),gt=n("p"),gt.innerHTML=Ps,ya=t(),xt=n("p"),xt.innerHTML=Is,Da=t(),Lt=n("p"),Lt.innerHTML=Hs,Ta=t(),Q=n("div"),_(no.$$.fragment),Sa=t(),bt=n("p"),bt.textContent=Es,Ca=t(),_(xe.$$.fragment),ka=t(),Le=n("div"),_(so.$$.fragment),Aa=t(),vt=n("p"),vt.textContent=Us,Ur=t(),_(io.$$.fragment),Fr=t(),R=n("div"),_(lo.$$.fragment),Ra=t(),wt=n("p"),wt.innerHTML=Fs,Pa=t(),be=n("div"),_(co.$$.fragment),Ia=t(),$t=n("p"),$t.innerHTML=Vs,Ha=t(),ve=n("div"),_(fo.$$.fragment),Ea=t(),Mt=n("p"),Mt.innerHTML=Xs,Ua=t(),X=n("div"),_(po.$$.fragment),Fa=t(),yt=n("p"),yt.innerHTML=Ns,Va=t(),Dt=n("p"),Dt.innerHTML=Ws,Xa=t(),Tt=n("p"),Tt.innerHTML=zs,Na=t(),St=n("p"),St.innerHTML=qs,Wa=t(),Ct=n("p"),Ct.innerHTML=Bs,za=t(),O=n("div"),_(_o.$$.fragment),qa=t(),kt=n("p"),kt.textContent=js,Ba=t(),_(we.$$.fragment),ja=t(),$e=n("div"),_(mo.$$.fragment),Ga=t(),At=n("p"),At.textContent=Gs,Vr=t(),_(uo.$$.fragment),Xr=t(),C=n("div"),_(ho.$$.fragment),Ja=t(),Rt=n("p"),Rt.innerHTML=Js,Za=t(),Pt=n("p"),Pt.innerHTML=Zs,Ya=t(),Me=n("div"),_(go.$$.fragment),Qa=t(),It=n("p"),It.innerHTML=Ys,Oa=t(),ye=n("div"),_(xo.$$.fragment),Ka=t(),Ht=n("p"),Ht.innerHTML=Qs,en=t(),q=n("div"),_(Lo.$$.fragment),on=t(),Et=n("p"),Et.innerHTML=Os,tn=t(),Ut=n("p"),Ut.innerHTML=Ks,rn=t(),Ft=n("p"),Ft.innerHTML=ei,an=t(),Vt=n("p"),Vt.innerHTML=oi,nn=t(),K=n("div"),_(bo.$$.fragment),sn=t(),Xt=n("p"),Xt.textContent=ti,dn=t(),_(De.$$.fragment),ln=t(),Te=n("div"),_(vo.$$.fragment),cn=t(),Nt=n("p"),Nt.textContent=ri,Nr=t(),_(wo.$$.fragment),Wr=t(),T=n("div"),_($o.$$.fragment),fn=t(),Wt=n("p"),Wt.innerHTML=ai,pn=t(),zt=n("p"),zt.innerHTML=ni,_n=t(),Se=n("div"),_(Mo.$$.fragment),mn=t(),qt=n("p"),qt.innerHTML=si,un=t(),Ce=n("div"),_(yo.$$.fragment),hn=t(),Bt=n("p"),Bt.innerHTML=ii,gn=t(),B=n("div"),_(Do.$$.fragment),xn=t(),jt=n("p"),jt.innerHTML=di,Ln=t(),Gt=n("p"),Gt.innerHTML=li,bn=t(),Jt=n("p"),Jt.innerHTML=ci,vn=t(),Zt=n("p"),Zt.innerHTML=fi,wn=t(),ee=n("div"),_(To.$$.fragment),$n=t(),Yt=n("p"),Yt.textContent=pi,Mn=t(),_(ke.$$.fragment),yn=t(),Ae=n("div"),_(So.$$.fragment),Dn=t(),Qt=n("p"),Qt.textContent=_i,Tn=t(),oe=n("div"),_(Co.$$.fragment),Sn=t(),Ot=n("p"),Ot.innerHTML=mi,Cn=t(),_(Re.$$.fragment),kn=t(),te=n("div"),_(ko.$$.fragment),An=t(),Kt=n("p"),Kt.textContent=ui,Rn=t(),_(Pe.$$.fragment),zr=t(),_(Ao.$$.fragment),qr=t(),P=n("div"),_(Ro.$$.fragment),Pn=t(),er=n("p"),er.innerHTML=hi,In=t(),Ie=n("div"),_(Po.$$.fragment),Hn=t(),or=n("p"),or.innerHTML=gi,En=t(),He=n("div"),_(Io.$$.fragment),Un=t(),tr=n("p"),tr.innerHTML=xi,Fn=t(),re=n("div"),_(Ho.$$.fragment),Vn=t(),rr=n("p"),rr.textContent=Li,Xn=t(),_(Ee.$$.fragment),Nn=t(),Ue=n("div"),_(Eo.$$.fragment),Wn=t(),ar=n("p"),ar.textContent=bi,zn=t(),ae=n("div"),_(Uo.$$.fragment),qn=t(),nr=n("p"),nr.innerHTML=vi,Bn=t(),_(Fe.$$.fragment),Br=t(),_(Fo.$$.fragment),jr=t(),I=n("div"),_(Vo.$$.fragment),jn=t(),sr=n("p"),sr.innerHTML=wi,Gn=t(),Ve=n("div"),_(Xo.$$.fragment),Jn=t(),ir=n("p"),ir.innerHTML=$i,Zn=t(),Xe=n("div"),_(No.$$.fragment),Yn=t(),dr=n("p"),dr.innerHTML=Mi,Qn=t(),ne=n("div"),_(Wo.$$.fragment),On=t(),lr=n("p"),lr.textContent=yi,Kn=t(),_(Ne.$$.fragment),es=t(),We=n("div"),_(zo.$$.fragment),os=t(),cr=n("p"),cr.textContent=Di,ts=t(),se=n("div"),_(qo.$$.fragment),rs=t(),fr=n("p"),fr.innerHTML=Ti,as=t(),_(ze.$$.fragment),Gr=t(),_(Bo.$$.fragment),Jr=t(),fe=n("div"),_(jo.$$.fragment),ns=t(),qe=n("div"),_(Go.$$.fragment),ss=t(),pr=n("p"),pr.innerHTML=Si,Zr=t(),_(Jo.$$.fragment),Yr=t(),S=n("div"),_(Zo.$$.fragment),is=t(),_r=n("p"),_r.textContent=Ci,ds=t(),mr=n("div"),_(Yo.$$.fragment),ls=t(),Z=n("div"),_(Qo.$$.fragment),cs=t(),ur=n("p"),ur.textContent=ki,fs=t(),_(Be.$$.fragment),ps=t(),_(je.$$.fragment),_s=t(),ie=n("div"),_(Oo.$$.fragment),ms=t(),hr=n("p"),hr.textContent=Ai,us=t(),_(Ge.$$.fragment),hs=t(),Je=n("div"),_(Ko.$$.fragment),gs=t(),gr=n("p"),gr.textContent=Ri,xs=t(),Ze=n("div"),_(et.$$.fragment),Ls=t(),xr=n("p"),xr.innerHTML=Pi,bs=t(),de=n("div"),_(ot.$$.fragment),vs=t(),Lr=n("p"),Lr.innerHTML=Ii,ws=t(),_(Ye.$$.fragment),$s=t(),le=n("div"),_(tt.$$.fragment),Ms=t(),br=n("p"),br.textContent=Hi,ys=t(),_(Qe.$$.fragment),Qr=t(),_(rt.$$.fragment),Or=t(),Rr=n("p"),this.h()},l(e){const p=Ni("svelte-u9bgzb",document.head);a=s(p,"META",{name:!0,content:!0}),p.forEach(d),b=r(e),c=s(e,"P",{}),v(c).forEach(d),l=r(e),m(y.$$.fragment,e),i=r(e),M=s(e,"P",{"data-svelte-h":!0}),f(M)!=="svelte-12kvmzd"&&(M.innerHTML=Ds),Pr=r(e),Ke=s(e,"UL",{"data-svelte-h":!0}),f(Ke)!=="svelte-jes06s"&&(Ke.innerHTML=Ts),Ir=r(e),m(ue.$$.fragment,e),Hr=r(e),m(eo.$$.fragment,e),Er=r(e),A=s(e,"DIV",{class:!0});var U=v(A);m(oo.$$.fragment,U),ha=r(U),pt=s(U,"P",{"data-svelte-h":!0}),f(pt)!=="svelte-17skqt4"&&(pt.innerHTML=Ss),ga=r(U),he=s(U,"DIV",{class:!0});var at=v(he);m(to.$$.fragment,at),xa=r(at),_t=s(at,"P",{"data-svelte-h":!0}),f(_t)!=="svelte-1062ci4"&&(_t.innerHTML=Cs),at.forEach(d),La=r(U),ge=s(U,"DIV",{class:!0});var nt=v(ge);m(ro.$$.fragment,nt),ba=r(nt),mt=s(nt,"P",{"data-svelte-h":!0}),f(mt)!=="svelte-u3q4so"&&(mt.innerHTML=ks),nt.forEach(d),va=r(U),V=s(U,"DIV",{class:!0});var W=v(V);m(ao.$$.fragment,W),wa=r(W),ut=s(W,"P",{"data-svelte-h":!0}),f(ut)!=="svelte-vs7s0z"&&(ut.innerHTML=As),$a=r(W),ht=s(W,"P",{"data-svelte-h":!0}),f(ht)!=="svelte-15b960v"&&(ht.innerHTML=Rs),Ma=r(W),gt=s(W,"P",{"data-svelte-h":!0}),f(gt)!=="svelte-1tazhb9"&&(gt.innerHTML=Ps),ya=r(W),xt=s(W,"P",{"data-svelte-h":!0}),f(xt)!=="svelte-1boeg66"&&(xt.innerHTML=Is),Da=r(W),Lt=s(W,"P",{"data-svelte-h":!0}),f(Lt)!=="svelte-19p3dv0"&&(Lt.innerHTML=Hs),W.forEach(d),Ta=r(U),Q=s(U,"DIV",{class:!0});var pe=v(Q);m(no.$$.fragment,pe),Sa=r(pe),bt=s(pe,"P",{"data-svelte-h":!0}),f(bt)!=="svelte-flusvq"&&(bt.textContent=Es),Ca=r(pe),m(xe.$$.fragment,pe),pe.forEach(d),ka=r(U),Le=s(U,"DIV",{class:!0});var st=v(Le);m(so.$$.fragment,st),Aa=r(st),vt=s(st,"P",{"data-svelte-h":!0}),f(vt)!=="svelte-1ufq5ot"&&(vt.textContent=Us),st.forEach(d),U.forEach(d),Ur=r(e),m(io.$$.fragment,e),Fr=r(e),R=s(e,"DIV",{class:!0});var F=v(R);m(lo.$$.fragment,F),Ra=r(F),wt=s(F,"P",{"data-svelte-h":!0}),f(wt)!=="svelte-1gg62p9"&&(wt.innerHTML=Fs),Pa=r(F),be=s(F,"DIV",{class:!0});var it=v(be);m(co.$$.fragment,it),Ia=r(it),$t=s(it,"P",{"data-svelte-h":!0}),f($t)!=="svelte-1062ci4"&&($t.innerHTML=Vs),it.forEach(d),Ha=r(F),ve=s(F,"DIV",{class:!0});var dt=v(ve);m(fo.$$.fragment,dt),Ea=r(dt),Mt=s(dt,"P",{"data-svelte-h":!0}),f(Mt)!=="svelte-u3q4so"&&(Mt.innerHTML=Xs),dt.forEach(d),Ua=r(F),X=s(F,"DIV",{class:!0});var z=v(X);m(po.$$.fragment,z),Fa=r(z),yt=s(z,"P",{"data-svelte-h":!0}),f(yt)!=="svelte-vs7s0z"&&(yt.innerHTML=Ns),Va=r(z),Dt=s(z,"P",{"data-svelte-h":!0}),f(Dt)!=="svelte-15b960v"&&(Dt.innerHTML=Ws),Xa=r(z),Tt=s(z,"P",{"data-svelte-h":!0}),f(Tt)!=="svelte-1tazhb9"&&(Tt.innerHTML=zs),Na=r(z),St=s(z,"P",{"data-svelte-h":!0}),f(St)!=="svelte-1boeg66"&&(St.innerHTML=qs),Wa=r(z),Ct=s(z,"P",{"data-svelte-h":!0}),f(Ct)!=="svelte-19p3dv0"&&(Ct.innerHTML=Bs),z.forEach(d),za=r(F),O=s(F,"DIV",{class:!0});var _e=v(O);m(_o.$$.fragment,_e),qa=r(_e),kt=s(_e,"P",{"data-svelte-h":!0}),f(kt)!=="svelte-flusvq"&&(kt.textContent=js),Ba=r(_e),m(we.$$.fragment,_e),_e.forEach(d),ja=r(F),$e=s(F,"DIV",{class:!0});var lt=v($e);m(mo.$$.fragment,lt),Ga=r(lt),At=s(lt,"P",{"data-svelte-h":!0}),f(At)!=="svelte-1ufq5ot"&&(At.textContent=Gs),lt.forEach(d),F.forEach(d),Vr=r(e),m(uo.$$.fragment,e),Xr=r(e),C=s(e,"DIV",{class:!0});var H=v(C);m(ho.$$.fragment,H),Ja=r(H),Rt=s(H,"P",{"data-svelte-h":!0}),f(Rt)!=="svelte-joqbj8"&&(Rt.innerHTML=Js),Za=r(H),Pt=s(H,"P",{"data-svelte-h":!0}),f(Pt)!=="svelte-1wbipxu"&&(Pt.innerHTML=Zs),Ya=r(H),Me=s(H,"DIV",{class:!0});var ct=v(Me);m(go.$$.fragment,ct),Qa=r(ct),It=s(ct,"P",{"data-svelte-h":!0}),f(It)!=="svelte-1062ci4"&&(It.innerHTML=Ys),ct.forEach(d),Oa=r(H),ye=s(H,"DIV",{class:!0});var ft=v(ye);m(xo.$$.fragment,ft),Ka=r(ft),Ht=s(ft,"P",{"data-svelte-h":!0}),f(Ht)!=="svelte-1lgbsz7"&&(Ht.innerHTML=Qs),ft.forEach(d),en=r(H),q=s(H,"DIV",{class:!0});var J=v(q);m(Lo.$$.fragment,J),on=r(J),Et=s(J,"P",{"data-svelte-h":!0}),f(Et)!=="svelte-vs7s0z"&&(Et.innerHTML=Os),tn=r(J),Ut=s(J,"P",{"data-svelte-h":!0}),f(Ut)!=="svelte-15b960v"&&(Ut.innerHTML=Ks),rn=r(J),Ft=s(J,"P",{"data-svelte-h":!0}),f(Ft)!=="svelte-1tazhb9"&&(Ft.innerHTML=ei),an=r(J),Vt=s(J,"P",{"data-svelte-h":!0}),f(Vt)!=="svelte-1ukghd4"&&(Vt.innerHTML=oi),J.forEach(d),nn=r(H),K=s(H,"DIV",{class:!0});var vr=v(K);m(bo.$$.fragment,vr),sn=r(vr),Xt=s(vr,"P",{"data-svelte-h":!0}),f(Xt)!=="svelte-flusvq"&&(Xt.textContent=ti),dn=r(vr),m(De.$$.fragment,vr),vr.forEach(d),ln=r(H),Te=s(H,"DIV",{class:!0});var ea=v(Te);m(vo.$$.fragment,ea),cn=r(ea),Nt=s(ea,"P",{"data-svelte-h":!0}),f(Nt)!=="svelte-1ufq5ot"&&(Nt.textContent=ri),ea.forEach(d),H.forEach(d),Nr=r(e),m(wo.$$.fragment,e),Wr=r(e),T=s(e,"DIV",{class:!0});var k=v(T);m($o.$$.fragment,k),fn=r(k),Wt=s(k,"P",{"data-svelte-h":!0}),f(Wt)!=="svelte-x2x6ex"&&(Wt.innerHTML=ai),pn=r(k),zt=s(k,"P",{"data-svelte-h":!0}),f(zt)!=="svelte-1wbipxu"&&(zt.innerHTML=ni),_n=r(k),Se=s(k,"DIV",{class:!0});var oa=v(Se);m(Mo.$$.fragment,oa),mn=r(oa),qt=s(oa,"P",{"data-svelte-h":!0}),f(qt)!=="svelte-1062ci4"&&(qt.innerHTML=si),oa.forEach(d),un=r(k),Ce=s(k,"DIV",{class:!0});var ta=v(Ce);m(yo.$$.fragment,ta),hn=r(ta),Bt=s(ta,"P",{"data-svelte-h":!0}),f(Bt)!=="svelte-1lgbsz7"&&(Bt.innerHTML=ii),ta.forEach(d),gn=r(k),B=s(k,"DIV",{class:!0});var ce=v(B);m(Do.$$.fragment,ce),xn=r(ce),jt=s(ce,"P",{"data-svelte-h":!0}),f(jt)!=="svelte-178gcly"&&(jt.innerHTML=di),Ln=r(ce),Gt=s(ce,"P",{"data-svelte-h":!0}),f(Gt)!=="svelte-15b960v"&&(Gt.innerHTML=li),bn=r(ce),Jt=s(ce,"P",{"data-svelte-h":!0}),f(Jt)!=="svelte-1tazhb9"&&(Jt.innerHTML=ci),vn=r(ce),Zt=s(ce,"P",{"data-svelte-h":!0}),f(Zt)!=="svelte-1ukghd4"&&(Zt.innerHTML=fi),ce.forEach(d),wn=r(k),ee=s(k,"DIV",{class:!0});var wr=v(ee);m(To.$$.fragment,wr),$n=r(wr),Yt=s(wr,"P",{"data-svelte-h":!0}),f(Yt)!=="svelte-flusvq"&&(Yt.textContent=pi),Mn=r(wr),m(ke.$$.fragment,wr),wr.forEach(d),yn=r(k),Ae=s(k,"DIV",{class:!0});var ra=v(Ae);m(So.$$.fragment,ra),Dn=r(ra),Qt=s(ra,"P",{"data-svelte-h":!0}),f(Qt)!=="svelte-1ufq5ot"&&(Qt.textContent=_i),ra.forEach(d),Tn=r(k),oe=s(k,"DIV",{class:!0});var $r=v(oe);m(Co.$$.fragment,$r),Sn=r($r),Ot=s($r,"P",{"data-svelte-h":!0}),f(Ot)!=="svelte-ioswce"&&(Ot.innerHTML=mi),Cn=r($r),m(Re.$$.fragment,$r),$r.forEach(d),kn=r(k),te=s(k,"DIV",{class:!0});var Mr=v(te);m(ko.$$.fragment,Mr),An=r(Mr),Kt=s(Mr,"P",{"data-svelte-h":!0}),f(Kt)!=="svelte-119cgd9"&&(Kt.textContent=ui),Rn=r(Mr),m(Pe.$$.fragment,Mr),Mr.forEach(d),k.forEach(d),zr=r(e),m(Ao.$$.fragment,e),qr=r(e),P=s(e,"DIV",{class:!0});var j=v(P);m(Ro.$$.fragment,j),Pn=r(j),er=s(j,"P",{"data-svelte-h":!0}),f(er)!=="svelte-9gd1zd"&&(er.innerHTML=hi),In=r(j),Ie=s(j,"DIV",{class:!0});var aa=v(Ie);m(Po.$$.fragment,aa),Hn=r(aa),or=s(aa,"P",{"data-svelte-h":!0}),f(or)!=="svelte-1lgbsz7"&&(or.innerHTML=gi),aa.forEach(d),En=r(j),He=s(j,"DIV",{class:!0});var na=v(He);m(Io.$$.fragment,na),Un=r(na),tr=s(na,"P",{"data-svelte-h":!0}),f(tr)!=="svelte-i68oio"&&(tr.innerHTML=xi),na.forEach(d),Fn=r(j),re=s(j,"DIV",{class:!0});var yr=v(re);m(Ho.$$.fragment,yr),Vn=r(yr),rr=s(yr,"P",{"data-svelte-h":!0}),f(rr)!=="svelte-flusvq"&&(rr.textContent=Li),Xn=r(yr),m(Ee.$$.fragment,yr),yr.forEach(d),Nn=r(j),Ue=s(j,"DIV",{class:!0});var sa=v(Ue);m(Eo.$$.fragment,sa),Wn=r(sa),ar=s(sa,"P",{"data-svelte-h":!0}),f(ar)!=="svelte-1ufq5ot"&&(ar.textContent=bi),sa.forEach(d),zn=r(j),ae=s(j,"DIV",{class:!0});var Dr=v(ae);m(Uo.$$.fragment,Dr),qn=r(Dr),nr=s(Dr,"P",{"data-svelte-h":!0}),f(nr)!=="svelte-ioswce"&&(nr.innerHTML=vi),Bn=r(Dr),m(Fe.$$.fragment,Dr),Dr.forEach(d),j.forEach(d),Br=r(e),m(Fo.$$.fragment,e),jr=r(e),I=s(e,"DIV",{class:!0});var G=v(I);m(Vo.$$.fragment,G),jn=r(G),sr=s(G,"P",{"data-svelte-h":!0}),f(sr)!=="svelte-evmoh1"&&(sr.innerHTML=wi),Gn=r(G),Ve=s(G,"DIV",{class:!0});var ia=v(Ve);m(Xo.$$.fragment,ia),Jn=r(ia),ir=s(ia,"P",{"data-svelte-h":!0}),f(ir)!=="svelte-1lgbsz7"&&(ir.innerHTML=$i),ia.forEach(d),Zn=r(G),Xe=s(G,"DIV",{class:!0});var da=v(Xe);m(No.$$.fragment,da),Yn=r(da),dr=s(da,"P",{"data-svelte-h":!0}),f(dr)!=="svelte-i68oio"&&(dr.innerHTML=Mi),da.forEach(d),Qn=r(G),ne=s(G,"DIV",{class:!0});var Tr=v(ne);m(Wo.$$.fragment,Tr),On=r(Tr),lr=s(Tr,"P",{"data-svelte-h":!0}),f(lr)!=="svelte-flusvq"&&(lr.textContent=yi),Kn=r(Tr),m(Ne.$$.fragment,Tr),Tr.forEach(d),es=r(G),We=s(G,"DIV",{class:!0});var la=v(We);m(zo.$$.fragment,la),os=r(la),cr=s(la,"P",{"data-svelte-h":!0}),f(cr)!=="svelte-1ufq5ot"&&(cr.textContent=Di),la.forEach(d),ts=r(G),se=s(G,"DIV",{class:!0});var Sr=v(se);m(qo.$$.fragment,Sr),rs=r(Sr),fr=s(Sr,"P",{"data-svelte-h":!0}),f(fr)!=="svelte-ioswce"&&(fr.innerHTML=Ti),as=r(Sr),m(ze.$$.fragment,Sr),Sr.forEach(d),G.forEach(d),Gr=r(e),m(Bo.$$.fragment,e),Jr=r(e),fe=s(e,"DIV",{class:!0});var ca=v(fe);m(jo.$$.fragment,ca),ns=r(ca),qe=s(ca,"DIV",{class:!0});var fa=v(qe);m(Go.$$.fragment,fa),ss=r(fa),pr=s(fa,"P",{"data-svelte-h":!0}),f(pr)!=="svelte-1lgbsz7"&&(pr.innerHTML=Si),fa.forEach(d),ca.forEach(d),Zr=r(e),m(Jo.$$.fragment,e),Yr=r(e),S=s(e,"DIV",{class:!0});var E=v(S);m(Zo.$$.fragment,E),is=r(E),_r=s(E,"P",{"data-svelte-h":!0}),f(_r)!=="svelte-1q4bbx"&&(_r.textContent=Ci),ds=r(E),mr=s(E,"DIV",{class:!0});var Ei=v(mr);m(Yo.$$.fragment,Ei),Ei.forEach(d),ls=r(E),Z=s(E,"DIV",{class:!0});var Oe=v(Z);m(Qo.$$.fragment,Oe),cs=r(Oe),ur=s(Oe,"P",{"data-svelte-h":!0}),f(ur)!=="svelte-1nr2dy0"&&(ur.textContent=ki),fs=r(Oe),m(Be.$$.fragment,Oe),ps=r(Oe),m(je.$$.fragment,Oe),Oe.forEach(d),_s=r(E),ie=s(E,"DIV",{class:!0});var Cr=v(ie);m(Oo.$$.fragment,Cr),ms=r(Cr),hr=s(Cr,"P",{"data-svelte-h":!0}),f(hr)!=="svelte-h0os0v"&&(hr.textContent=Ai),us=r(Cr),m(Ge.$$.fragment,Cr),Cr.forEach(d),hs=r(E),Je=s(E,"DIV",{class:!0});var pa=v(Je);m(Ko.$$.fragment,pa),gs=r(pa),gr=s(pa,"P",{"data-svelte-h":!0}),f(gr)!=="svelte-1825k9e"&&(gr.textContent=Ri),pa.forEach(d),xs=r(E),Ze=s(E,"DIV",{class:!0});var _a=v(Ze);m(et.$$.fragment,_a),Ls=r(_a),xr=s(_a,"P",{"data-svelte-h":!0}),f(xr)!=="svelte-rvubqa"&&(xr.innerHTML=Pi),_a.forEach(d),bs=r(E),de=s(E,"DIV",{class:!0});var kr=v(de);m(ot.$$.fragment,kr),vs=r(kr),Lr=s(kr,"P",{"data-svelte-h":!0}),f(Lr)!=="svelte-ioswce"&&(Lr.innerHTML=Ii),ws=r(kr),m(Ye.$$.fragment,kr),kr.forEach(d),$s=r(E),le=s(E,"DIV",{class:!0});var Ar=v(le);m(tt.$$.fragment,Ar),Ms=r(Ar),br=s(Ar,"P",{"data-svelte-h":!0}),f(br)!=="svelte-119cgd9"&&(br.textContent=Hi),ys=r(Ar),m(Qe.$$.fragment,Ar),Ar.forEach(d),E.forEach(d),Qr=r(e),m(rt.$$.fragment,e),Or=r(e),Rr=s(e,"P",{}),v(Rr).forEach(d),this.h()},h(){w(a,"name","hf:doc:metadata"),w(a,"content",sd),w(he,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(ge,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(V,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(Q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(Le,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(A,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(be,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(ve,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(X,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(O,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w($e,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(R,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(Me,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(ye,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(K,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(Te,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(C,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(Se,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(Ce,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(B,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(ee,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(Ae,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(oe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(te,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(T,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(Ie,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(He,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(re,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(Ue,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(ae,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(P,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(Ve,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(Xe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(ne,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(We,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(se,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(I,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(qe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(fe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(mr,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(Z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(ie,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(Je,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(Ze,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(de,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(le,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),w(S,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,p){o(document.head,a),L(e,b,p),L(e,c,p),L(e,l,p),u(y,e,p),L(e,i,p),L(e,M,p),L(e,Pr,p),L(e,Ke,p),L(e,Ir,p),u(ue,e,p),L(e,Hr,p),u(eo,e,p),L(e,Er,p),L(e,A,p),u(oo,A,null),o(A,ha),o(A,pt),o(A,ga),o(A,he),u(to,he,null),o(he,xa),o(he,_t),o(A,La),o(A,ge),u(ro,ge,null),o(ge,ba),o(ge,mt),o(A,va),o(A,V),u(ao,V,null),o(V,wa),o(V,ut),o(V,$a),o(V,ht),o(V,Ma),o(V,gt),o(V,ya),o(V,xt),o(V,Da),o(V,Lt),o(A,Ta),o(A,Q),u(no,Q,null),o(Q,Sa),o(Q,bt),o(Q,Ca),u(xe,Q,null),o(A,ka),o(A,Le),u(so,Le,null),o(Le,Aa),o(Le,vt),L(e,Ur,p),u(io,e,p),L(e,Fr,p),L(e,R,p),u(lo,R,null),o(R,Ra),o(R,wt),o(R,Pa),o(R,be),u(co,be,null),o(be,Ia),o(be,$t),o(R,Ha),o(R,ve),u(fo,ve,null),o(ve,Ea),o(ve,Mt),o(R,Ua),o(R,X),u(po,X,null),o(X,Fa),o(X,yt),o(X,Va),o(X,Dt),o(X,Xa),o(X,Tt),o(X,Na),o(X,St),o(X,Wa),o(X,Ct),o(R,za),o(R,O),u(_o,O,null),o(O,qa),o(O,kt),o(O,Ba),u(we,O,null),o(R,ja),o(R,$e),u(mo,$e,null),o($e,Ga),o($e,At),L(e,Vr,p),u(uo,e,p),L(e,Xr,p),L(e,C,p),u(ho,C,null),o(C,Ja),o(C,Rt),o(C,Za),o(C,Pt),o(C,Ya),o(C,Me),u(go,Me,null),o(Me,Qa),o(Me,It),o(C,Oa),o(C,ye),u(xo,ye,null),o(ye,Ka),o(ye,Ht),o(C,en),o(C,q),u(Lo,q,null),o(q,on),o(q,Et),o(q,tn),o(q,Ut),o(q,rn),o(q,Ft),o(q,an),o(q,Vt),o(C,nn),o(C,K),u(bo,K,null),o(K,sn),o(K,Xt),o(K,dn),u(De,K,null),o(C,ln),o(C,Te),u(vo,Te,null),o(Te,cn),o(Te,Nt),L(e,Nr,p),u(wo,e,p),L(e,Wr,p),L(e,T,p),u($o,T,null),o(T,fn),o(T,Wt),o(T,pn),o(T,zt),o(T,_n),o(T,Se),u(Mo,Se,null),o(Se,mn),o(Se,qt),o(T,un),o(T,Ce),u(yo,Ce,null),o(Ce,hn),o(Ce,Bt),o(T,gn),o(T,B),u(Do,B,null),o(B,xn),o(B,jt),o(B,Ln),o(B,Gt),o(B,bn),o(B,Jt),o(B,vn),o(B,Zt),o(T,wn),o(T,ee),u(To,ee,null),o(ee,$n),o(ee,Yt),o(ee,Mn),u(ke,ee,null),o(T,yn),o(T,Ae),u(So,Ae,null),o(Ae,Dn),o(Ae,Qt),o(T,Tn),o(T,oe),u(Co,oe,null),o(oe,Sn),o(oe,Ot),o(oe,Cn),u(Re,oe,null),o(T,kn),o(T,te),u(ko,te,null),o(te,An),o(te,Kt),o(te,Rn),u(Pe,te,null),L(e,zr,p),u(Ao,e,p),L(e,qr,p),L(e,P,p),u(Ro,P,null),o(P,Pn),o(P,er),o(P,In),o(P,Ie),u(Po,Ie,null),o(Ie,Hn),o(Ie,or),o(P,En),o(P,He),u(Io,He,null),o(He,Un),o(He,tr),o(P,Fn),o(P,re),u(Ho,re,null),o(re,Vn),o(re,rr),o(re,Xn),u(Ee,re,null),o(P,Nn),o(P,Ue),u(Eo,Ue,null),o(Ue,Wn),o(Ue,ar),o(P,zn),o(P,ae),u(Uo,ae,null),o(ae,qn),o(ae,nr),o(ae,Bn),u(Fe,ae,null),L(e,Br,p),u(Fo,e,p),L(e,jr,p),L(e,I,p),u(Vo,I,null),o(I,jn),o(I,sr),o(I,Gn),o(I,Ve),u(Xo,Ve,null),o(Ve,Jn),o(Ve,ir),o(I,Zn),o(I,Xe),u(No,Xe,null),o(Xe,Yn),o(Xe,dr),o(I,Qn),o(I,ne),u(Wo,ne,null),o(ne,On),o(ne,lr),o(ne,Kn),u(Ne,ne,null),o(I,es),o(I,We),u(zo,We,null),o(We,os),o(We,cr),o(I,ts),o(I,se),u(qo,se,null),o(se,rs),o(se,fr),o(se,as),u(ze,se,null),L(e,Gr,p),u(Bo,e,p),L(e,Jr,p),L(e,fe,p),u(jo,fe,null),o(fe,ns),o(fe,qe),u(Go,qe,null),o(qe,ss),o(qe,pr),L(e,Zr,p),u(Jo,e,p),L(e,Yr,p),L(e,S,p),u(Zo,S,null),o(S,is),o(S,_r),o(S,ds),o(S,mr),u(Yo,mr,null),o(S,ls),o(S,Z),u(Qo,Z,null),o(Z,cs),o(Z,ur),o(Z,fs),u(Be,Z,null),o(Z,ps),u(je,Z,null),o(S,_s),o(S,ie),u(Oo,ie,null),o(ie,ms),o(ie,hr),o(ie,us),u(Ge,ie,null),o(S,hs),o(S,Je),u(Ko,Je,null),o(Je,gs),o(Je,gr),o(S,xs),o(S,Ze),u(et,Ze,null),o(Ze,Ls),o(Ze,xr),o(S,bs),o(S,de),u(ot,de,null),o(de,vs),o(de,Lr),o(de,ws),u(Ye,de,null),o(S,$s),o(S,le),u(tt,le,null),o(le,Ms),o(le,br),o(le,ys),u(Qe,le,null),L(e,Qr,p),u(rt,e,p),L(e,Or,p),L(e,Rr,p),Kr=!0},p(e,[p]){const U={};p&2&&(U.$$scope={dirty:p,ctx:e}),ue.$set(U);const at={};p&2&&(at.$$scope={dirty:p,ctx:e}),xe.$set(at);const nt={};p&2&&(nt.$$scope={dirty:p,ctx:e}),we.$set(nt);const W={};p&2&&(W.$$scope={dirty:p,ctx:e}),De.$set(W);const pe={};p&2&&(pe.$$scope={dirty:p,ctx:e}),ke.$set(pe);const st={};p&2&&(st.$$scope={dirty:p,ctx:e}),Re.$set(st);const F={};p&2&&(F.$$scope={dirty:p,ctx:e}),Pe.$set(F);const it={};p&2&&(it.$$scope={dirty:p,ctx:e}),Ee.$set(it);const dt={};p&2&&(dt.$$scope={dirty:p,ctx:e}),Fe.$set(dt);const z={};p&2&&(z.$$scope={dirty:p,ctx:e}),Ne.$set(z);const _e={};p&2&&(_e.$$scope={dirty:p,ctx:e}),ze.$set(_e);const lt={};p&2&&(lt.$$scope={dirty:p,ctx:e}),Be.$set(lt);const H={};p&2&&(H.$$scope={dirty:p,ctx:e}),je.$set(H);const ct={};p&2&&(ct.$$scope={dirty:p,ctx:e}),Ge.$set(ct);const ft={};p&2&&(ft.$$scope={dirty:p,ctx:e}),Ye.$set(ft);const J={};p&2&&(J.$$scope={dirty:p,ctx:e}),Qe.$set(J)},i(e){Kr||(h(y.$$.fragment,e),h(ue.$$.fragment,e),h(eo.$$.fragment,e),h(oo.$$.fragment,e),h(to.$$.fragment,e),h(ro.$$.fragment,e),h(ao.$$.fragment,e),h(no.$$.fragment,e),h(xe.$$.fragment,e),h(so.$$.fragment,e),h(io.$$.fragment,e),h(lo.$$.fragment,e),h(co.$$.fragment,e),h(fo.$$.fragment,e),h(po.$$.fragment,e),h(_o.$$.fragment,e),h(we.$$.fragment,e),h(mo.$$.fragment,e),h(uo.$$.fragment,e),h(ho.$$.fragment,e),h(go.$$.fragment,e),h(xo.$$.fragment,e),h(Lo.$$.fragment,e),h(bo.$$.fragment,e),h(De.$$.fragment,e),h(vo.$$.fragment,e),h(wo.$$.fragment,e),h($o.$$.fragment,e),h(Mo.$$.fragment,e),h(yo.$$.fragment,e),h(Do.$$.fragment,e),h(To.$$.fragment,e),h(ke.$$.fragment,e),h(So.$$.fragment,e),h(Co.$$.fragment,e),h(Re.$$.fragment,e),h(ko.$$.fragment,e),h(Pe.$$.fragment,e),h(Ao.$$.fragment,e),h(Ro.$$.fragment,e),h(Po.$$.fragment,e),h(Io.$$.fragment,e),h(Ho.$$.fragment,e),h(Ee.$$.fragment,e),h(Eo.$$.fragment,e),h(Uo.$$.fragment,e),h(Fe.$$.fragment,e),h(Fo.$$.fragment,e),h(Vo.$$.fragment,e),h(Xo.$$.fragment,e),h(No.$$.fragment,e),h(Wo.$$.fragment,e),h(Ne.$$.fragment,e),h(zo.$$.fragment,e),h(qo.$$.fragment,e),h(ze.$$.fragment,e),h(Bo.$$.fragment,e),h(jo.$$.fragment,e),h(Go.$$.fragment,e),h(Jo.$$.fragment,e),h(Zo.$$.fragment,e),h(Yo.$$.fragment,e),h(Qo.$$.fragment,e),h(Be.$$.fragment,e),h(je.$$.fragment,e),h(Oo.$$.fragment,e),h(Ge.$$.fragment,e),h(Ko.$$.fragment,e),h(et.$$.fragment,e),h(ot.$$.fragment,e),h(Ye.$$.fragment,e),h(tt.$$.fragment,e),h(Qe.$$.fragment,e),h(rt.$$.fragment,e),Kr=!0)},o(e){g(y.$$.fragment,e),g(ue.$$.fragment,e),g(eo.$$.fragment,e),g(oo.$$.fragment,e),g(to.$$.fragment,e),g(ro.$$.fragment,e),g(ao.$$.fragment,e),g(no.$$.fragment,e),g(xe.$$.fragment,e),g(so.$$.fragment,e),g(io.$$.fragment,e),g(lo.$$.fragment,e),g(co.$$.fragment,e),g(fo.$$.fragment,e),g(po.$$.fragment,e),g(_o.$$.fragment,e),g(we.$$.fragment,e),g(mo.$$.fragment,e),g(uo.$$.fragment,e),g(ho.$$.fragment,e),g(go.$$.fragment,e),g(xo.$$.fragment,e),g(Lo.$$.fragment,e),g(bo.$$.fragment,e),g(De.$$.fragment,e),g(vo.$$.fragment,e),g(wo.$$.fragment,e),g($o.$$.fragment,e),g(Mo.$$.fragment,e),g(yo.$$.fragment,e),g(Do.$$.fragment,e),g(To.$$.fragment,e),g(ke.$$.fragment,e),g(So.$$.fragment,e),g(Co.$$.fragment,e),g(Re.$$.fragment,e),g(ko.$$.fragment,e),g(Pe.$$.fragment,e),g(Ao.$$.fragment,e),g(Ro.$$.fragment,e),g(Po.$$.fragment,e),g(Io.$$.fragment,e),g(Ho.$$.fragment,e),g(Ee.$$.fragment,e),g(Eo.$$.fragment,e),g(Uo.$$.fragment,e),g(Fe.$$.fragment,e),g(Fo.$$.fragment,e),g(Vo.$$.fragment,e),g(Xo.$$.fragment,e),g(No.$$.fragment,e),g(Wo.$$.fragment,e),g(Ne.$$.fragment,e),g(zo.$$.fragment,e),g(qo.$$.fragment,e),g(ze.$$.fragment,e),g(Bo.$$.fragment,e),g(jo.$$.fragment,e),g(Go.$$.fragment,e),g(Jo.$$.fragment,e),g(Zo.$$.fragment,e),g(Yo.$$.fragment,e),g(Qo.$$.fragment,e),g(Be.$$.fragment,e),g(je.$$.fragment,e),g(Oo.$$.fragment,e),g(Ge.$$.fragment,e),g(Ko.$$.fragment,e),g(et.$$.fragment,e),g(ot.$$.fragment,e),g(Ye.$$.fragment,e),g(tt.$$.fragment,e),g(Qe.$$.fragment,e),g(rt.$$.fragment,e),Kr=!1},d(e){e&&(d(b),d(c),d(l),d(i),d(M),d(Pr),d(Ke),d(Ir),d(Hr),d(Er),d(A),d(Ur),d(Fr),d(R),d(Vr),d(Xr),d(C),d(Nr),d(Wr),d(T),d(zr),d(qr),d(P),d(Br),d(jr),d(I),d(Gr),d(Jr),d(fe),d(Zr),d(Yr),d(S),d(Qr),d(Or),d(Rr)),d(a),x(y,e),x(ue,e),x(eo,e),x(oo),x(to),x(ro),x(ao),x(no),x(xe),x(so),x(io,e),x(lo),x(co),x(fo),x(po),x(_o),x(we),x(mo),x(uo,e),x(ho),x(go),x(xo),x(Lo),x(bo),x(De),x(vo),x(wo,e),x($o),x(Mo),x(yo),x(Do),x(To),x(ke),x(So),x(Co),x(Re),x(ko),x(Pe),x(Ao,e),x(Ro),x(Po),x(Io),x(Ho),x(Ee),x(Eo),x(Uo),x(Fe),x(Fo,e),x(Vo),x(Xo),x(No),x(Wo),x(Ne),x(zo),x(qo),x(ze),x(Bo,e),x(jo),x(Go),x(Jo,e),x(Zo),x(Yo),x(Qo),x(Be),x(je),x(Oo),x(Ge),x(Ko),x(et),x(ot),x(Ye),x(tt),x(Qe),x(rt,e)}}}const sd='{"title":"LoRA","local":"lora","sections":[{"title":"StableDiffusionLoraLoaderMixin","local":"diffusers.loaders.StableDiffusionLoraLoaderMixin","sections":[],"depth":2},{"title":"StableDiffusionXLLoraLoaderMixin","local":"diffusers.loaders.StableDiffusionXLLoraLoaderMixin","sections":[],"depth":2},{"title":"SD3LoraLoaderMixin","local":"diffusers.loaders.SD3LoraLoaderMixin","sections":[],"depth":2},{"title":"FluxLoraLoaderMixin","local":"diffusers.loaders.FluxLoraLoaderMixin","sections":[],"depth":2},{"title":"CogVideoXLoraLoaderMixin","local":"diffusers.loaders.CogVideoXLoraLoaderMixin","sections":[],"depth":2},{"title":"Mochi1LoraLoaderMixin","local":"diffusers.loaders.Mochi1LoraLoaderMixin","sections":[],"depth":2},{"title":"AmusedLoraLoaderMixin","local":"diffusers.loaders.AmusedLoraLoaderMixin","sections":[],"depth":2},{"title":"LoraBaseMixin","local":"diffusers.loaders.lora_base.LoraBaseMixin","sections":[],"depth":2}],"depth":1}';function id(D){return Fi(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class ud extends Vi{constructor(a){super(),Xi(this,a,id,nd,Ui,{})}}export{ud as component}; | |
Xet Storage Details
- Size:
- 144 kB
- Xet hash:
- 318e4646aabf2aa6fb1dcaa74f43bb60944cc33576d5955aa68f34e9d6658d74
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.