Buckets:

rtrm's picture
download
raw
23.3 kB
import{s as nr,o as ar,n as ir}from"../chunks/scheduler.8c3d61f6.js";import{S as or,i as dr,g as i,s,r as l,A as fr,h as o,f as r,c as n,j as _,u as m,x as b,k as $,y as t,a as u,v as c,d as p,t as g,w as h}from"../chunks/index.da70eac4.js";import{T as ur}from"../chunks/Tip.1d9b8c37.js";import{D as v}from"../chunks/Docstring.fa488882.js";import{H as Le,E as lr}from"../chunks/index.dfbaf638.js";function mr(He){let y,I='Learn how to quantize models in the <a href="../quantization/overview">Quantization</a> guide.';return{c(){y=i("p"),y.innerHTML=I},l(z){y=o(z,"P",{"data-svelte-h":!0}),b(y)!=="svelte-1dd515k"&&(y.innerHTML=I)},m(z,ge){u(z,y,ge)},p:ir,d(z){z&&r(y)}}}function cr(He){let y,I,z,ge,V,Pe,j,Vt='Quantization techniques reduce memory and computational costs by representing weights and activations with lower-precision data types like 8-bit integers (int8). This enables loading larger models you normally wouldn’t be able to fit into memory, and speeding up inference. Diffusers supports 8-bit and 4-bit quantization with <a href="https://huggingface.co/docs/bitsandbytes/en/index" rel="nofollow">bitsandbytes</a>.',Ae,G,jt='Quantization techniques that aren’t supported in Transformers can be added with the <a href="/docs/diffusers/pr_11415/en/api/quantization#diffusers.DiffusersQuantizer">DiffusersQuantizer</a> class.',Ie,x,Ve,B,je,U,F,Ge,S,Be,O,Y,Ue,N,Fe,R,W,Se,J,Oe,K,X,Ye,Z,Ne,d,ee,ut,he,Gt=`Abstract class of the HuggingFace quantizer. Supports for now quantizing HF diffusers models for inference and/or
quantization. This class is used only for diffusers.models.modeling_utils.ModelMixin.from_pretrained and cannot be
easily used outside the scope of that method yet.`,lt,_e,Bt=`Attributes
quantization_config (<code>diffusers.quantizers.quantization_config.QuantizationConfigMixin</code>):
The quantization config that defines the quantization parameters of your model that you want to quantize.
modules_to_not_convert (<code>List[str]</code>, <em>optional</em>):
The list of module names to not convert when quantizing the model.
required_packages (<code>List[str]</code>, <em>optional</em>):
The list of required pip packages to install prior to using the quantizer
requires_calibration (<code>bool</code>):
Whether the quantization method requires to calibrate the model before using it.`,mt,w,te,ct,$e,Ut="adjust max_memory argument for infer_auto_device_map() if extra memory is needed for quantization",pt,q,re,gt,ve,Ft=`Override this method if you want to adjust the <code>target_dtype</code> variable used in <code>from_pretrained</code> to compute the
device_map in case the device_map is a <code>str</code>. E.g. for bitsandbytes we force-set <code>target_dtype</code> to <code>torch.int8</code>
and for 4-bit we pass a custom enum <code>accelerate.CustomDtype.int4</code>.`,ht,D,se,_t,be,St=`checks if a loaded state_dict component is part of quantized param + some validation; only defined for
quantization methods that require to create a new parameters for quantization.`,$t,C,ne,vt,ye,Ot="checks if the quantized param has expected shape.",bt,T,ae,yt,ze,Yt="takes needed components from state_dict and creates quantized param.",zt,Q,ie,xt,xe,Nt=`Potentially dequantize the model to retrive the original model, with some loss in accuracy / performance. Note
not all quantization schemes support this.`,wt,k,oe,qt,we,Rt=`returns dtypes for modules that are not quantized - used for the computation of the device_map in case one
passes a str as a device_map. The method will use the <code>modules_to_not_convert</code> that is modified in
<code>_process_model_before_weight_loading</code>. <code>diffusers</code> models don’t have any <code>modules_to_not_convert</code> attributes
yet but this can change soon in the future.`,Dt,L,de,Ct,qe,Wt=`Post-process the model post weights loading. Make sure to override the abstract method
<code>_process_model_after_weight_loading</code>.`,Tt,M,fe,Qt,De,Jt=`Setting model attributes and/or converting model before weights loading. At this point the model should be
initialized on the meta device so you can freely manipulate the skeleton of the model in order to replace
modules in-place. Make sure to override the abstract method <code>_process_model_before_weight_loading</code>.`,kt,E,ue,Lt,Ce,Kt=`Override this method if you want to pass a override the existing device map with a new one. E.g. for
bitsandbytes, since <code>accelerate</code> is a hard requirement, if no device_map is passed, the device_map is set to
\`“auto”“`,Mt,H,le,Et,Te,Xt="Override this method if you want to adjust the <code>missing_keys</code>.",Ht,P,me,Pt,Qe,Zt=`Some quantization methods require to explicitly set the dtype of the model to a target dtype. You need to
override this method in case you want to make sure that behavior is preserved`,At,A,ce,It,ke,er=`This method is used to potentially check for potential conflicts with arguments that are passed in
<code>from_pretrained</code>. You need to define it for all future quantizers that are integrated with diffusers. If no
explicit check are needed, simply return nothing.`,Re,pe,We,Me,Je;return V=new Le({props:{title:"Quantization",local:"quantization",headingTag:"h1"}}),x=new ur({props:{$$slots:{default:[mr]},$$scope:{ctx:He}}}),B=new Le({props:{title:"BitsAndBytesConfig",local:"diffusers.BitsAndBytesConfig",headingTag:"h2"}}),F=new v({props:{name:"class diffusers.BitsAndBytesConfig",anchor:"diffusers.BitsAndBytesConfig",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/diffusers/blob/vr_11415/src/diffusers/utils/dummy_bitsandbytes_objects.py#L5"}}),S=new Le({props:{title:"GGUFQuantizationConfig",local:"diffusers.GGUFQuantizationConfig",headingTag:"h2"}}),Y=new v({props:{name:"class diffusers.GGUFQuantizationConfig",anchor:"diffusers.GGUFQuantizationConfig",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/diffusers/blob/vr_11415/src/diffusers/utils/dummy_gguf_objects.py#L5"}}),N=new Le({props:{title:"QuantoConfig",local:"diffusers.QuantoConfig",headingTag:"h2"}}),W=new v({props:{name:"class diffusers.QuantoConfig",anchor:"diffusers.QuantoConfig",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/diffusers/blob/vr_11415/src/diffusers/utils/dummy_optimum_quanto_objects.py#L5"}}),J=new Le({props:{title:"TorchAoConfig",local:"diffusers.TorchAoConfig",headingTag:"h2"}}),X=new v({props:{name:"class diffusers.TorchAoConfig",anchor:"diffusers.TorchAoConfig",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/diffusers/blob/vr_11415/src/diffusers/utils/dummy_torchao_objects.py#L5"}}),Z=new Le({props:{title:"DiffusersQuantizer",local:"diffusers.DiffusersQuantizer",headingTag:"h2"}}),ee=new v({props:{name:"class diffusers.DiffusersQuantizer",anchor:"diffusers.DiffusersQuantizer",parameters:[{name:"quantization_config",val:": QuantizationConfigMixin"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/diffusers/blob/vr_11415/src/diffusers/quantizers/base.py#L34"}}),te=new v({props:{name:"adjust_max_memory",anchor:"diffusers.DiffusersQuantizer.adjust_max_memory",parameters:[{name:"max_memory",val:": typing.Dict[str, typing.Union[int, str]]"}],source:"https://github.com/huggingface/diffusers/blob/vr_11415/src/diffusers/quantizers/base.py#L133"}}),re=new v({props:{name:"adjust_target_dtype",anchor:"diffusers.DiffusersQuantizer.adjust_target_dtype",parameters:[{name:"torch_dtype",val:": torch.dtype"}],parametersDescription:[{anchor:"diffusers.DiffusersQuantizer.adjust_target_dtype.torch_dtype",description:`<strong>torch_dtype</strong> (<code>torch.dtype</code>, <em>optional</em>) &#x2014;
The torch_dtype that is used to compute the device_map.`,name:"torch_dtype"}],source:"https://github.com/huggingface/diffusers/blob/vr_11415/src/diffusers/quantizers/base.py#L91"}}),se=new v({props:{name:"check_if_quantized_param",anchor:"diffusers.DiffusersQuantizer.check_if_quantized_param",parameters:[{name:"model",val:": ModelMixin"},{name:"param_value",val:": torch.Tensor"},{name:"param_name",val:": str"},{name:"state_dict",val:": typing.Dict[str, typing.Any]"},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/diffusers/blob/vr_11415/src/diffusers/quantizers/base.py#L137"}}),ne=new v({props:{name:"check_quantized_param_shape",anchor:"diffusers.DiffusersQuantizer.check_quantized_param_shape",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/diffusers/blob/vr_11415/src/diffusers/quantizers/base.py#L157"}}),ae=new v({props:{name:"create_quantized_param",anchor:"diffusers.DiffusersQuantizer.create_quantized_param",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/diffusers/blob/vr_11415/src/diffusers/quantizers/base.py#L151"}}),ie=new v({props:{name:"dequantize",anchor:"diffusers.DiffusersQuantizer.dequantize",parameters:[{name:"model",val:""}],source:"https://github.com/huggingface/diffusers/blob/vr_11415/src/diffusers/quantizers/base.py#L200"}}),oe=new v({props:{name:"get_special_dtypes_update",anchor:"diffusers.DiffusersQuantizer.get_special_dtypes_update",parameters:[{name:"model",val:""},{name:"torch_dtype",val:": torch.dtype"}],parametersDescription:[{anchor:"diffusers.DiffusersQuantizer.get_special_dtypes_update.model",description:`<strong>model</strong> (<code>~diffusers.models.modeling_utils.ModelMixin</code>) &#x2014;
The model to quantize`,name:"model"},{anchor:"diffusers.DiffusersQuantizer.get_special_dtypes_update.torch_dtype",description:`<strong>torch_dtype</strong> (<code>torch.dtype</code>) &#x2014;
The dtype passed in <code>from_pretrained</code> method.`,name:"torch_dtype"}],source:"https://github.com/huggingface/diffusers/blob/vr_11415/src/diffusers/quantizers/base.py#L113"}}),de=new v({props:{name:"postprocess_model",anchor:"diffusers.DiffusersQuantizer.postprocess_model",parameters:[{name:"model",val:": ModelMixin"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.DiffusersQuantizer.postprocess_model.model",description:`<strong>model</strong> (<code>~diffusers.models.modeling_utils.ModelMixin</code>) &#x2014;
The model to quantize`,name:"model"},{anchor:"diffusers.DiffusersQuantizer.postprocess_model.kwargs",description:`<strong>kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014;
The keyword arguments that are passed along <code>_process_model_after_weight_loading</code>.`,name:"kwargs"}],source:"https://github.com/huggingface/diffusers/blob/vr_11415/src/diffusers/quantizers/base.py#L187"}}),fe=new v({props:{name:"preprocess_model",anchor:"diffusers.DiffusersQuantizer.preprocess_model",parameters:[{name:"model",val:": ModelMixin"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.DiffusersQuantizer.preprocess_model.model",description:`<strong>model</strong> (<code>~diffusers.models.modeling_utils.ModelMixin</code>) &#x2014;
The model to quantize`,name:"model"},{anchor:"diffusers.DiffusersQuantizer.preprocess_model.kwargs",description:`<strong>kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014;
The keyword arguments that are passed along <code>_process_model_before_weight_loading</code>.`,name:"kwargs"}],source:"https://github.com/huggingface/diffusers/blob/vr_11415/src/diffusers/quantizers/base.py#L171"}}),ue=new v({props:{name:"update_device_map",anchor:"diffusers.DiffusersQuantizer.update_device_map",parameters:[{name:"device_map",val:": typing.Optional[typing.Dict[str, typing.Any]]"}],parametersDescription:[{anchor:"diffusers.DiffusersQuantizer.update_device_map.device_map",description:`<strong>device_map</strong> (<code>Union[dict, str]</code>, <em>optional</em>) &#x2014;
The device_map that is passed through the <code>from_pretrained</code> method.`,name:"device_map"}],source:"https://github.com/huggingface/diffusers/blob/vr_11415/src/diffusers/quantizers/base.py#L79"}}),le=new v({props:{name:"update_missing_keys",anchor:"diffusers.DiffusersQuantizer.update_missing_keys",parameters:[{name:"model",val:""},{name:"missing_keys",val:": typing.List[str]"},{name:"prefix",val:": str"}],parametersDescription:[{anchor:"diffusers.DiffusersQuantizer.update_missing_keys.missing_keys",description:`<strong>missing_keys</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014;
The list of missing keys in the checkpoint compared to the state dict of the model`,name:"missing_keys"}],source:"https://github.com/huggingface/diffusers/blob/vr_11415/src/diffusers/quantizers/base.py#L103"}}),me=new v({props:{name:"update_torch_dtype",anchor:"diffusers.DiffusersQuantizer.update_torch_dtype",parameters:[{name:"torch_dtype",val:": torch.dtype"}],parametersDescription:[{anchor:"diffusers.DiffusersQuantizer.update_torch_dtype.torch_dtype",description:`<strong>torch_dtype</strong> (<code>torch.dtype</code>) &#x2014;
The input dtype that is passed in <code>from_pretrained</code>`,name:"torch_dtype"}],source:"https://github.com/huggingface/diffusers/blob/vr_11415/src/diffusers/quantizers/base.py#L68"}}),ce=new v({props:{name:"validate_environment",anchor:"diffusers.DiffusersQuantizer.validate_environment",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/diffusers/blob/vr_11415/src/diffusers/quantizers/base.py#L163"}}),pe=new lr({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/quantization.md"}}),{c(){y=i("meta"),I=s(),z=i("p"),ge=s(),l(V.$$.fragment),Pe=s(),j=i("p"),j.innerHTML=Vt,Ae=s(),G=i("p"),G.innerHTML=jt,Ie=s(),l(x.$$.fragment),Ve=s(),l(B.$$.fragment),je=s(),U=i("div"),l(F.$$.fragment),Ge=s(),l(S.$$.fragment),Be=s(),O=i("div"),l(Y.$$.fragment),Ue=s(),l(N.$$.fragment),Fe=s(),R=i("div"),l(W.$$.fragment),Se=s(),l(J.$$.fragment),Oe=s(),K=i("div"),l(X.$$.fragment),Ye=s(),l(Z.$$.fragment),Ne=s(),d=i("div"),l(ee.$$.fragment),ut=s(),he=i("p"),he.textContent=Gt,lt=s(),_e=i("p"),_e.innerHTML=Bt,mt=s(),w=i("div"),l(te.$$.fragment),ct=s(),$e=i("p"),$e.textContent=Ut,pt=s(),q=i("div"),l(re.$$.fragment),gt=s(),ve=i("p"),ve.innerHTML=Ft,ht=s(),D=i("div"),l(se.$$.fragment),_t=s(),be=i("p"),be.textContent=St,$t=s(),C=i("div"),l(ne.$$.fragment),vt=s(),ye=i("p"),ye.textContent=Ot,bt=s(),T=i("div"),l(ae.$$.fragment),yt=s(),ze=i("p"),ze.textContent=Yt,zt=s(),Q=i("div"),l(ie.$$.fragment),xt=s(),xe=i("p"),xe.textContent=Nt,wt=s(),k=i("div"),l(oe.$$.fragment),qt=s(),we=i("p"),we.innerHTML=Rt,Dt=s(),L=i("div"),l(de.$$.fragment),Ct=s(),qe=i("p"),qe.innerHTML=Wt,Tt=s(),M=i("div"),l(fe.$$.fragment),Qt=s(),De=i("p"),De.innerHTML=Jt,kt=s(),E=i("div"),l(ue.$$.fragment),Lt=s(),Ce=i("p"),Ce.innerHTML=Kt,Mt=s(),H=i("div"),l(le.$$.fragment),Et=s(),Te=i("p"),Te.innerHTML=Xt,Ht=s(),P=i("div"),l(me.$$.fragment),Pt=s(),Qe=i("p"),Qe.textContent=Zt,At=s(),A=i("div"),l(ce.$$.fragment),It=s(),ke=i("p"),ke.innerHTML=er,Re=s(),l(pe.$$.fragment),We=s(),Me=i("p"),this.h()},l(e){const a=fr("svelte-u9bgzb",document.head);y=o(a,"META",{name:!0,content:!0}),a.forEach(r),I=n(e),z=o(e,"P",{}),_(z).forEach(r),ge=n(e),m(V.$$.fragment,e),Pe=n(e),j=o(e,"P",{"data-svelte-h":!0}),b(j)!=="svelte-gx66go"&&(j.innerHTML=Vt),Ae=n(e),G=o(e,"P",{"data-svelte-h":!0}),b(G)!=="svelte-1djxqph"&&(G.innerHTML=jt),Ie=n(e),m(x.$$.fragment,e),Ve=n(e),m(B.$$.fragment,e),je=n(e),U=o(e,"DIV",{class:!0});var Ee=_(U);m(F.$$.fragment,Ee),Ee.forEach(r),Ge=n(e),m(S.$$.fragment,e),Be=n(e),O=o(e,"DIV",{class:!0});var tr=_(O);m(Y.$$.fragment,tr),tr.forEach(r),Ue=n(e),m(N.$$.fragment,e),Fe=n(e),R=o(e,"DIV",{class:!0});var rr=_(R);m(W.$$.fragment,rr),rr.forEach(r),Se=n(e),m(J.$$.fragment,e),Oe=n(e),K=o(e,"DIV",{class:!0});var sr=_(K);m(X.$$.fragment,sr),sr.forEach(r),Ye=n(e),m(Z.$$.fragment,e),Ne=n(e),d=o(e,"DIV",{class:!0});var f=_(d);m(ee.$$.fragment,f),ut=n(f),he=o(f,"P",{"data-svelte-h":!0}),b(he)!=="svelte-n1gm3c"&&(he.textContent=Gt),lt=n(f),_e=o(f,"P",{"data-svelte-h":!0}),b(_e)!=="svelte-1fnduym"&&(_e.innerHTML=Bt),mt=n(f),w=o(f,"DIV",{class:!0});var Ke=_(w);m(te.$$.fragment,Ke),ct=n(Ke),$e=o(Ke,"P",{"data-svelte-h":!0}),b($e)!=="svelte-1d4i4z7"&&($e.textContent=Ut),Ke.forEach(r),pt=n(f),q=o(f,"DIV",{class:!0});var Xe=_(q);m(re.$$.fragment,Xe),gt=n(Xe),ve=o(Xe,"P",{"data-svelte-h":!0}),b(ve)!=="svelte-eyxa3y"&&(ve.innerHTML=Ft),Xe.forEach(r),ht=n(f),D=o(f,"DIV",{class:!0});var Ze=_(D);m(se.$$.fragment,Ze),_t=n(Ze),be=o(Ze,"P",{"data-svelte-h":!0}),b(be)!=="svelte-y0lxmf"&&(be.textContent=St),Ze.forEach(r),$t=n(f),C=o(f,"DIV",{class:!0});var et=_(C);m(ne.$$.fragment,et),vt=n(et),ye=o(et,"P",{"data-svelte-h":!0}),b(ye)!=="svelte-1170ss6"&&(ye.textContent=Ot),et.forEach(r),bt=n(f),T=o(f,"DIV",{class:!0});var tt=_(T);m(ae.$$.fragment,tt),yt=n(tt),ze=o(tt,"P",{"data-svelte-h":!0}),b(ze)!=="svelte-odkanz"&&(ze.textContent=Yt),tt.forEach(r),zt=n(f),Q=o(f,"DIV",{class:!0});var rt=_(Q);m(ie.$$.fragment,rt),xt=n(rt),xe=o(rt,"P",{"data-svelte-h":!0}),b(xe)!=="svelte-1wzx7fq"&&(xe.textContent=Nt),rt.forEach(r),wt=n(f),k=o(f,"DIV",{class:!0});var st=_(k);m(oe.$$.fragment,st),qt=n(st),we=o(st,"P",{"data-svelte-h":!0}),b(we)!=="svelte-1cb2fzf"&&(we.innerHTML=Rt),st.forEach(r),Dt=n(f),L=o(f,"DIV",{class:!0});var nt=_(L);m(de.$$.fragment,nt),Ct=n(nt),qe=o(nt,"P",{"data-svelte-h":!0}),b(qe)!=="svelte-6rgq61"&&(qe.innerHTML=Wt),nt.forEach(r),Tt=n(f),M=o(f,"DIV",{class:!0});var at=_(M);m(fe.$$.fragment,at),Qt=n(at),De=o(at,"P",{"data-svelte-h":!0}),b(De)!=="svelte-wiwxpv"&&(De.innerHTML=Jt),at.forEach(r),kt=n(f),E=o(f,"DIV",{class:!0});var it=_(E);m(ue.$$.fragment,it),Lt=n(it),Ce=o(it,"P",{"data-svelte-h":!0}),b(Ce)!=="svelte-5g503h"&&(Ce.innerHTML=Kt),it.forEach(r),Mt=n(f),H=o(f,"DIV",{class:!0});var ot=_(H);m(le.$$.fragment,ot),Et=n(ot),Te=o(ot,"P",{"data-svelte-h":!0}),b(Te)!=="svelte-13hqoy0"&&(Te.innerHTML=Xt),ot.forEach(r),Ht=n(f),P=o(f,"DIV",{class:!0});var dt=_(P);m(me.$$.fragment,dt),Pt=n(dt),Qe=o(dt,"P",{"data-svelte-h":!0}),b(Qe)!=="svelte-udr2k7"&&(Qe.textContent=Zt),dt.forEach(r),At=n(f),A=o(f,"DIV",{class:!0});var ft=_(A);m(ce.$$.fragment,ft),It=n(ft),ke=o(ft,"P",{"data-svelte-h":!0}),b(ke)!=="svelte-1ra2q1u"&&(ke.innerHTML=er),ft.forEach(r),f.forEach(r),Re=n(e),m(pe.$$.fragment,e),We=n(e),Me=o(e,"P",{}),_(Me).forEach(r),this.h()},h(){$(y,"name","hf:doc:metadata"),$(y,"content",pr),$(U,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(O,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(R,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(K,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(w,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(D,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(C,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(T,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(Q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(k,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(L,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(M,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(E,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(H,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(P,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(A,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(d,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,a){t(document.head,y),u(e,I,a),u(e,z,a),u(e,ge,a),c(V,e,a),u(e,Pe,a),u(e,j,a),u(e,Ae,a),u(e,G,a),u(e,Ie,a),c(x,e,a),u(e,Ve,a),c(B,e,a),u(e,je,a),u(e,U,a),c(F,U,null),u(e,Ge,a),c(S,e,a),u(e,Be,a),u(e,O,a),c(Y,O,null),u(e,Ue,a),c(N,e,a),u(e,Fe,a),u(e,R,a),c(W,R,null),u(e,Se,a),c(J,e,a),u(e,Oe,a),u(e,K,a),c(X,K,null),u(e,Ye,a),c(Z,e,a),u(e,Ne,a),u(e,d,a),c(ee,d,null),t(d,ut),t(d,he),t(d,lt),t(d,_e),t(d,mt),t(d,w),c(te,w,null),t(w,ct),t(w,$e),t(d,pt),t(d,q),c(re,q,null),t(q,gt),t(q,ve),t(d,ht),t(d,D),c(se,D,null),t(D,_t),t(D,be),t(d,$t),t(d,C),c(ne,C,null),t(C,vt),t(C,ye),t(d,bt),t(d,T),c(ae,T,null),t(T,yt),t(T,ze),t(d,zt),t(d,Q),c(ie,Q,null),t(Q,xt),t(Q,xe),t(d,wt),t(d,k),c(oe,k,null),t(k,qt),t(k,we),t(d,Dt),t(d,L),c(de,L,null),t(L,Ct),t(L,qe),t(d,Tt),t(d,M),c(fe,M,null),t(M,Qt),t(M,De),t(d,kt),t(d,E),c(ue,E,null),t(E,Lt),t(E,Ce),t(d,Mt),t(d,H),c(le,H,null),t(H,Et),t(H,Te),t(d,Ht),t(d,P),c(me,P,null),t(P,Pt),t(P,Qe),t(d,At),t(d,A),c(ce,A,null),t(A,It),t(A,ke),u(e,Re,a),c(pe,e,a),u(e,We,a),u(e,Me,a),Je=!0},p(e,[a]){const Ee={};a&2&&(Ee.$$scope={dirty:a,ctx:e}),x.$set(Ee)},i(e){Je||(p(V.$$.fragment,e),p(x.$$.fragment,e),p(B.$$.fragment,e),p(F.$$.fragment,e),p(S.$$.fragment,e),p(Y.$$.fragment,e),p(N.$$.fragment,e),p(W.$$.fragment,e),p(J.$$.fragment,e),p(X.$$.fragment,e),p(Z.$$.fragment,e),p(ee.$$.fragment,e),p(te.$$.fragment,e),p(re.$$.fragment,e),p(se.$$.fragment,e),p(ne.$$.fragment,e),p(ae.$$.fragment,e),p(ie.$$.fragment,e),p(oe.$$.fragment,e),p(de.$$.fragment,e),p(fe.$$.fragment,e),p(ue.$$.fragment,e),p(le.$$.fragment,e),p(me.$$.fragment,e),p(ce.$$.fragment,e),p(pe.$$.fragment,e),Je=!0)},o(e){g(V.$$.fragment,e),g(x.$$.fragment,e),g(B.$$.fragment,e),g(F.$$.fragment,e),g(S.$$.fragment,e),g(Y.$$.fragment,e),g(N.$$.fragment,e),g(W.$$.fragment,e),g(J.$$.fragment,e),g(X.$$.fragment,e),g(Z.$$.fragment,e),g(ee.$$.fragment,e),g(te.$$.fragment,e),g(re.$$.fragment,e),g(se.$$.fragment,e),g(ne.$$.fragment,e),g(ae.$$.fragment,e),g(ie.$$.fragment,e),g(oe.$$.fragment,e),g(de.$$.fragment,e),g(fe.$$.fragment,e),g(ue.$$.fragment,e),g(le.$$.fragment,e),g(me.$$.fragment,e),g(ce.$$.fragment,e),g(pe.$$.fragment,e),Je=!1},d(e){e&&(r(I),r(z),r(ge),r(Pe),r(j),r(Ae),r(G),r(Ie),r(Ve),r(je),r(U),r(Ge),r(Be),r(O),r(Ue),r(Fe),r(R),r(Se),r(Oe),r(K),r(Ye),r(Ne),r(d),r(Re),r(We),r(Me)),r(y),h(V,e),h(x,e),h(B,e),h(F),h(S,e),h(Y),h(N,e),h(W),h(J,e),h(X),h(Z,e),h(ee),h(te),h(re),h(se),h(ne),h(ae),h(ie),h(oe),h(de),h(fe),h(ue),h(le),h(me),h(ce),h(pe,e)}}}const pr='{"title":"Quantization","local":"quantization","sections":[{"title":"BitsAndBytesConfig","local":"diffusers.BitsAndBytesConfig","sections":[],"depth":2},{"title":"GGUFQuantizationConfig","local":"diffusers.GGUFQuantizationConfig","sections":[],"depth":2},{"title":"QuantoConfig","local":"diffusers.QuantoConfig","sections":[],"depth":2},{"title":"TorchAoConfig","local":"diffusers.TorchAoConfig","sections":[],"depth":2},{"title":"DiffusersQuantizer","local":"diffusers.DiffusersQuantizer","sections":[],"depth":2}],"depth":1}';function gr(He){return ar(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class yr extends or{constructor(y){super(),dr(this,y,gr,cr,nr,{})}}export{yr as component};

Xet Storage Details

Size:
23.3 kB
·
Xet hash:
d46ba66068e337e42ededd403e252d223dafa96e64f9ccf667cd7695aef9e280

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.