Buckets:
| import{s as ae,n as oe,o as ne}from"../chunks/scheduler.53228c21.js";import{S as ie,i as de,e as i,s as a,c as b,h as le,a as d,d as r,b as o,f as K,g as v,j as E,k as A,l as x,m as s,n as M,t as y,o as S,p as P}from"../chunks/index.100fac89.js";import{C as me}from"../chunks/CopyLLMTxtMenu.88008e00.js";import{D as re}from"../chunks/Docstring.98d3e518.js";import{H as se,E as fe}from"../chunks/MermaidChart.svelte_svelte_type_style_lang.afa087fa.js";function pe(W){let l,I,C,k,p,z,c,R,u,X='This class is useful when <em>only</em> loading weights into a <a href="/docs/diffusers/pr_12448/en/api/models/sd3_transformer2d#diffusers.SD3Transformer2DModel">SD3Transformer2DModel</a>. If you need to load weights into the text encoder or a text encoder and SD3Transformer2DModel, check <a href="lora#diffusers.loaders.SD3LoraLoaderMixin"><code>SD3LoraLoaderMixin</code></a> class instead.',j,g,Y="The <code>SD3Transformer2DLoadersMixin</code> class currently only loads IP-Adapter weights, but will be used in the future to save weights and load LoRAs.",O,m,Z='<p>To learn more about how to load LoRA weights, see the <a href="../../tutorials/using_peft_for_inference">LoRA</a> loading guide.</p>',q,_,U,n,h,Q,T,ee="Load IP-Adapters and LoRA layers into a <code>[SD3Transformer2DModel]</code>.",J,f,$,N,w,te="Sets IP-Adapter attention processors, image projection, and loads state_dict.",V,D,B,H,F;return p=new me({props:{containerStyle:"float: right; margin-left: 10px; display: inline-flex; position: relative; z-index: 10;"}}),c=new se({props:{title:"SD3Transformer2D",local:"sd3transformer2d",headingTag:"h1"}}),_=new se({props:{title:"SD3Transformer2DLoadersMixin",local:"diffusers.loaders.SD3Transformer2DLoadersMixin",headingTag:"h2"}}),h=new re({props:{name:"class diffusers.loaders.SD3Transformer2DLoadersMixin",anchor:"diffusers.loaders.SD3Transformer2DLoadersMixin",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/loaders/transformer_sd3.py#L28"}}),$=new re({props:{name:"_load_ip_adapter_weights",anchor:"diffusers.loaders.SD3Transformer2DLoadersMixin._load_ip_adapter_weights",parameters:[{name:"state_dict",val:": typing.Dict"},{name:"low_cpu_mem_usage",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.loaders.SD3Transformer2DLoadersMixin._load_ip_adapter_weights.state_dict",description:`<strong>state_dict</strong> (<code>Dict</code>) — | |
| State dict with keys “ip_adapter”, which contains parameters for attention processors, and | |
| “image_proj”, which contains parameters for image projection net.`,name:"state_dict"},{anchor:"diffusers.loaders.SD3Transformer2DLoadersMixin._load_ip_adapter_weights.low_cpu_mem_usage",description:`<strong>low_cpu_mem_usage</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code> if torch version >= 1.9.0 else <code>False</code>) — | |
| Speed up model loading only loading the pretrained weights and not initializing the weights. This also | |
| tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. | |
| Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this | |
| argument to <code>True</code> will raise an error.`,name:"low_cpu_mem_usage"}],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/loaders/transformer_sd3.py#L158"}}),D=new fe({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/loaders/transformer_sd3.md"}}),{c(){l=i("meta"),I=a(),C=i("p"),k=a(),b(p.$$.fragment),z=a(),b(c.$$.fragment),R=a(),u=i("p"),u.innerHTML=X,j=a(),g=i("p"),g.innerHTML=Y,O=a(),m=i("blockquote"),m.innerHTML=Z,q=a(),b(_.$$.fragment),U=a(),n=i("div"),b(h.$$.fragment),Q=a(),T=i("p"),T.innerHTML=ee,J=a(),f=i("div"),b($.$$.fragment),N=a(),w=i("p"),w.textContent=te,V=a(),b(D.$$.fragment),B=a(),H=i("p"),this.h()},l(e){const t=le("svelte-u9bgzb",document.head);l=d(t,"META",{name:!0,content:!0}),t.forEach(r),I=o(e),C=d(e,"P",{}),K(C).forEach(r),k=o(e),v(p.$$.fragment,e),z=o(e),v(c.$$.fragment,e),R=o(e),u=d(e,"P",{"data-svelte-h":!0}),E(u)!=="svelte-d6g05g"&&(u.innerHTML=X),j=o(e),g=d(e,"P",{"data-svelte-h":!0}),E(g)!=="svelte-b199o"&&(g.innerHTML=Y),O=o(e),m=d(e,"BLOCKQUOTE",{class:!0,"data-svelte-h":!0}),E(m)!=="svelte-140bgsv"&&(m.innerHTML=Z),q=o(e),v(_.$$.fragment,e),U=o(e),n=d(e,"DIV",{class:!0});var L=K(n);v(h.$$.fragment,L),Q=o(L),T=d(L,"P",{"data-svelte-h":!0}),E(T)!=="svelte-v7p57z"&&(T.innerHTML=ee),J=o(L),f=d(L,"DIV",{class:!0});var G=K(f);v($.$$.fragment,G),N=o(G),w=d(G,"P",{"data-svelte-h":!0}),E(w)!=="svelte-ym8e73"&&(w.textContent=te),G.forEach(r),L.forEach(r),V=o(e),v(D.$$.fragment,e),B=o(e),H=d(e,"P",{}),K(H).forEach(r),this.h()},h(){A(l,"name","hf:doc:metadata"),A(l,"content",ce),A(m,"class","tip"),A(f,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),A(n,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,t){x(document.head,l),s(e,I,t),s(e,C,t),s(e,k,t),M(p,e,t),s(e,z,t),M(c,e,t),s(e,R,t),s(e,u,t),s(e,j,t),s(e,g,t),s(e,O,t),s(e,m,t),s(e,q,t),M(_,e,t),s(e,U,t),s(e,n,t),M(h,n,null),x(n,Q),x(n,T),x(n,J),x(n,f),M($,f,null),x(f,N),x(f,w),s(e,V,t),M(D,e,t),s(e,B,t),s(e,H,t),F=!0},p:oe,i(e){F||(y(p.$$.fragment,e),y(c.$$.fragment,e),y(_.$$.fragment,e),y(h.$$.fragment,e),y($.$$.fragment,e),y(D.$$.fragment,e),F=!0)},o(e){S(p.$$.fragment,e),S(c.$$.fragment,e),S(_.$$.fragment,e),S(h.$$.fragment,e),S($.$$.fragment,e),S(D.$$.fragment,e),F=!1},d(e){e&&(r(I),r(C),r(k),r(z),r(R),r(u),r(j),r(g),r(O),r(m),r(q),r(U),r(n),r(V),r(B),r(H)),r(l),P(p,e),P(c,e),P(_,e),P(h),P($),P(D,e)}}}const ce='{"title":"SD3Transformer2D","local":"sd3transformer2d","sections":[{"title":"SD3Transformer2DLoadersMixin","local":"diffusers.loaders.SD3Transformer2DLoadersMixin","sections":[],"depth":2}],"depth":1}';function ue(W){return ne(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class xe extends ie{constructor(l){super(),de(this,l,ue,pe,ae,{})}}export{xe as component}; | |
Xet Storage Details
- Size:
- 6.08 kB
- Xet hash:
- d93ce14705c30973661c87aced9f7771701ca509e8d3cd8060c563cd1ea48504
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.