Buckets:
| import{S as Va,i as Ea,s as Da,e as a,k as n,w as g,t as c,M as La,c as t,d as o,m as i,a as s,x as f,h as m,b as l,G as e,g as y,y as u,L as za,q as h,o as _,B as v,v as xa}from"../../chunks/vendor-hf-doc-builder.js";import{D as $}from"../../chunks/Docstring-hf-doc-builder.js";import{I as qo}from"../../chunks/IconCopyLink-hf-doc-builder.js";function Ta(Ho){let D,Je,L,T,be,K,Ir,$e,wr,je,w,Vr,ye,Er,Dr,_e,Lr,zr,Ke,I,xr,Pe,Tr,Nr,Ie,Ar,Cr,we,Mr,kr,Ve,Or,Rr,Qe,z,N,Ee,Q,Sr,De,Br,Xe,d,X,Gr,Le,Fr,Wr,A,Z,qr,ze,Hr,Ur,C,ee,Yr,xe,Jr,jr,M,re,Kr,Te,Qr,Xr,k,oe,Zr,Ne,eo,ro,O,ae,oo,te,ao,Ae,to,so,no,R,se,io,Ce,co,mo,S,ne,lo,Me,po,go,B,ie,fo,ke,uo,ho,G,ce,_o,Oe,vo,bo,F,me,$o,Re,yo,Po,W,de,Io,Se,wo,Vo,q,le,Eo,Be,Do,Ze,x,H,Ge,pe,Lo,Fe,zo,er,U,xo,We,To,No,rr,P,ge,Ao,qe,Co,Mo,Y,fe,ko,He,Oo,Ro,J,ue,So,Ue,Bo,Go,j,he,Fo,Ye,Wo,or;return K=new qo({}),Q=new qo({}),X=new $({props:{name:"class diffusers.image_processor.VaeImageProcessor",anchor:"diffusers.image_processor.VaeImageProcessor",parameters:[{name:"do_resize",val:": bool = True"},{name:"vae_scale_factor",val:": int = 8"},{name:"resample",val:": str = 'lanczos'"},{name:"do_normalize",val:": bool = True"},{name:"do_binarize",val:": bool = False"},{name:"do_convert_rgb",val:": bool = False"},{name:"do_convert_grayscale",val:": bool = False"}],parametersDescription:[{anchor:"diffusers.image_processor.VaeImageProcessor.do_resize",description:`<strong>do_resize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether to downscale the image’s (height, width) dimensions to multiples of <code>vae_scale_factor</code>. Can accept | |
| <code>height</code> and <code>width</code> arguments from <a href="/docs/diffusers/main/en/api/image_processor#diffusers.image_processor.VaeImageProcessor.preprocess">image_processor.VaeImageProcessor.preprocess()</a> method.`,name:"do_resize"},{anchor:"diffusers.image_processor.VaeImageProcessor.vae_scale_factor",description:`<strong>vae_scale_factor</strong> (<code>int</code>, <em>optional</em>, defaults to <code>8</code>) — | |
| VAE scale factor. If <code>do_resize</code> is <code>True</code>, the image is automatically resized to multiples of this factor.`,name:"vae_scale_factor"},{anchor:"diffusers.image_processor.VaeImageProcessor.resample",description:`<strong>resample</strong> (<code>str</code>, <em>optional</em>, defaults to <code>lanczos</code>) — | |
| Resampling filter to use when resizing the image.`,name:"resample"},{anchor:"diffusers.image_processor.VaeImageProcessor.do_normalize",description:`<strong>do_normalize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether to normalize the image to [-1,1].`,name:"do_normalize"},{anchor:"diffusers.image_processor.VaeImageProcessor.do_binarize",description:`<strong>do_binarize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether to binarize the image to 0/1.`,name:"do_binarize"},{anchor:"diffusers.image_processor.VaeImageProcessor.do_convert_rgb",description:`<strong>do_convert_rgb</strong> (<code>bool</code>, <em>optional</em>, defaults to be <code>False</code>) — | |
| Whether to convert the images to RGB format.`,name:"do_convert_rgb"},{anchor:"diffusers.image_processor.VaeImageProcessor.do_convert_grayscale",description:`<strong>do_convert_grayscale</strong> (<code>bool</code>, <em>optional</em>, defaults to be <code>False</code>) — | |
| Whether to convert the images to grayscale format.`,name:"do_convert_grayscale"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/image_processor.py#L37"}}),Z=new $({props:{name:"binarize",anchor:"diffusers.image_processor.VaeImageProcessor.binarize",parameters:[{name:"image",val:": Image"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/image_processor.py#L228"}}),ee=new $({props:{name:"convert_to_grayscale",anchor:"diffusers.image_processor.VaeImageProcessor.convert_to_grayscale",parameters:[{name:"image",val:": Image"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/image_processor.py#L151"}}),re=new $({props:{name:"convert_to_rgb",anchor:"diffusers.image_processor.VaeImageProcessor.convert_to_rgb",parameters:[{name:"image",val:": Image"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/image_processor.py#L142"}}),oe=new $({props:{name:"denormalize",anchor:"diffusers.image_processor.VaeImageProcessor.denormalize",parameters:[{name:"images",val:""}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/image_processor.py#L135"}}),ae=new $({props:{name:"get_default_height_width",anchor:"diffusers.image_processor.VaeImageProcessor.get_default_height_width",parameters:[{name:"image",val:": [<class 'PIL.Image.Image'>, <class 'numpy.ndarray'>, <class 'torch.Tensor'>]"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"}],parametersDescription:[{anchor:"diffusers.image_processor.VaeImageProcessor.get_default_height_width.image(PIL.Image.Image,",description:`<strong>image(<code>PIL.Image.Image</code>,</strong> <code>np.ndarray</code> or <code>torch.Tensor</code>) — | |
| The image input, can be a PIL image, numpy array or pytorch tensor. if it is a numpy array, should have | |
| shape <code>[batch, height, width]</code> or <code>[batch, height, width, channel]</code> if it is a pytorch tensor, should | |
| have shape <code>[batch, channel, height, width]</code>.`,name:"image(PIL.Image.Image,"},{anchor:"diffusers.image_processor.VaeImageProcessor.get_default_height_width.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to <code>None</code>) — | |
| The height in preprocessed image. If <code>None</code>, will use the height of <code>image</code> input.`,name:"height"},{anchor:"diffusers.image_processor.VaeImageProcessor.get_default_height_width.width",description:"<strong>width</strong> (<code>int</code>, <em>optional</em><code>, defaults to </code>None<code>) -- The width in preprocessed. If </code>None<code>, will use the width of the </code>image` input.",name:"width"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/image_processor.py#L160"}}),se=new $({props:{name:"normalize",anchor:"diffusers.image_processor.VaeImageProcessor.normalize",parameters:[{name:"images",val:""}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/image_processor.py#L128"}}),ne=new $({props:{name:"numpy_to_pil",anchor:"diffusers.image_processor.VaeImageProcessor.numpy_to_pil",parameters:[{name:"images",val:": ndarray"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/image_processor.py#L81"}}),ie=new $({props:{name:"numpy_to_pt",anchor:"diffusers.image_processor.VaeImageProcessor.numpy_to_pt",parameters:[{name:"images",val:": ndarray"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/image_processor.py#L109"}}),ce=new $({props:{name:"pil_to_numpy",anchor:"diffusers.image_processor.VaeImageProcessor.pil_to_numpy",parameters:[{name:"images",val:": typing.Union[typing.List[PIL.Image.Image], PIL.Image.Image]"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/image_processor.py#L97"}}),me=new $({props:{name:"preprocess",anchor:"diffusers.image_processor.VaeImageProcessor.preprocess",parameters:[{name:"image",val:": typing.Union[torch.FloatTensor, PIL.Image.Image, numpy.ndarray]"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/image_processor.py#L236"}}),de=new $({props:{name:"pt_to_numpy",anchor:"diffusers.image_processor.VaeImageProcessor.pt_to_numpy",parameters:[{name:"images",val:": FloatTensor"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/image_processor.py#L120"}}),le=new $({props:{name:"resize",anchor:"diffusers.image_processor.VaeImageProcessor.resize",parameters:[{name:"image",val:": [<class 'PIL.Image.Image'>, <class 'numpy.ndarray'>, <class 'torch.Tensor'>]"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/image_processor.py#L203"}}),pe=new qo({}),ge=new $({props:{name:"class diffusers.image_processor.VaeImageProcessorLDM3D",anchor:"diffusers.image_processor.VaeImageProcessorLDM3D",parameters:[{name:"do_resize",val:": bool = True"},{name:"vae_scale_factor",val:": int = 8"},{name:"resample",val:": str = 'lanczos'"},{name:"do_normalize",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.image_processor.VaeImageProcessorLDM3D.do_resize",description:`<strong>do_resize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether to downscale the image’s (height, width) dimensions to multiples of <code>vae_scale_factor</code>.`,name:"do_resize"},{anchor:"diffusers.image_processor.VaeImageProcessorLDM3D.vae_scale_factor",description:`<strong>vae_scale_factor</strong> (<code>int</code>, <em>optional</em>, defaults to <code>8</code>) — | |
| VAE scale factor. If <code>do_resize</code> is <code>True</code>, the image is automatically resized to multiples of this factor.`,name:"vae_scale_factor"},{anchor:"diffusers.image_processor.VaeImageProcessorLDM3D.resample",description:`<strong>resample</strong> (<code>str</code>, <em>optional</em>, defaults to <code>lanczos</code>) — | |
| Resampling filter to use when resizing the image.`,name:"resample"},{anchor:"diffusers.image_processor.VaeImageProcessorLDM3D.do_normalize",description:`<strong>do_normalize</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether to normalize the image to [-1,1].`,name:"do_normalize"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/image_processor.py#L365"}}),fe=new $({props:{name:"numpy_to_depth",anchor:"diffusers.image_processor.VaeImageProcessorLDM3D.numpy_to_depth",parameters:[{name:"images",val:""}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/image_processor.py#L419"}}),ue=new $({props:{name:"numpy_to_pil",anchor:"diffusers.image_processor.VaeImageProcessorLDM3D.numpy_to_pil",parameters:[{name:"images",val:""}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/image_processor.py#L392"}}),he=new $({props:{name:"rgblike_to_depthmap",anchor:"diffusers.image_processor.VaeImageProcessorLDM3D.rgblike_to_depthmap",parameters:[{name:"image",val:""}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/image_processor.py#L408"}}),{c(){D=a("meta"),Je=n(),L=a("h1"),T=a("a"),be=a("span"),g(K.$$.fragment),Ir=n(),$e=a("span"),wr=c("VAE Image Processor"),je=n(),w=a("p"),Vr=c("The "),ye=a("code"),Er=c("VaeImageProcessor"),Dr=c(" provides a unified API for "),_e=a("a"),Lr=c("StableDiffusionPipeline"),zr=c("\u2019s to prepare image inputs for VAE encoding and post-processing outputs once they\u2019re decoded. This includes transformations such as resizing, normalization, and conversion between PIL Image, PyTorch, and NumPy arrays."),Ke=n(),I=a("p"),xr=c("All pipelines with "),Pe=a("code"),Tr=c("VaeImageProcessor"),Nr=c(" accepts PIL Image, PyTorch tensor, or NumPy arrays as image inputs and returns outputs based on the "),Ie=a("code"),Ar=c("output_type"),Cr=c(" argument by the user. You can pass encoded image latents directly to the pipeline and return latents from the pipeline as a specific output with the "),we=a("code"),Mr=c("output_type"),kr=c(" argument (for example "),Ve=a("code"),Or=c('output_type="pt"'),Rr=c("). This allows you to take the generated latents from one pipeline and pass it to another pipeline as input without leaving the latent space. It also makes it much easier to use multiple pipelines together by passing PyTorch tensors directly between different pipelines."),Qe=n(),z=a("h2"),N=a("a"),Ee=a("span"),g(Q.$$.fragment),Sr=n(),De=a("span"),Br=c("VaeImageProcessor"),Xe=n(),d=a("div"),g(X.$$.fragment),Gr=n(),Le=a("p"),Fr=c("Image processor for VAE."),Wr=n(),A=a("div"),g(Z.$$.fragment),qr=n(),ze=a("p"),Hr=c("create a mask"),Ur=n(),C=a("div"),g(ee.$$.fragment),Yr=n(),xe=a("p"),Jr=c("Converts a PIL image to grayscale format."),jr=n(),M=a("div"),g(re.$$.fragment),Kr=n(),Te=a("p"),Qr=c("Converts a PIL image to RGB format."),Xr=n(),k=a("div"),g(oe.$$.fragment),Zr=n(),Ne=a("p"),eo=c("Denormalize an image array to [0,1]."),ro=n(),O=a("div"),g(ae.$$.fragment),oo=n(),te=a("p"),ao=c(`This function return the height and width that are downscaled to the next integer multiple of | |
| `),Ae=a("code"),to=c("vae_scale_factor"),so=c("."),no=n(),R=a("div"),g(se.$$.fragment),io=n(),Ce=a("p"),co=c("Normalize an image array to [-1,1]."),mo=n(),S=a("div"),g(ne.$$.fragment),lo=n(),Me=a("p"),po=c("Convert a numpy image or a batch of images to a PIL image."),go=n(),B=a("div"),g(ie.$$.fragment),fo=n(),ke=a("p"),uo=c("Convert a NumPy image to a PyTorch tensor."),ho=n(),G=a("div"),g(ce.$$.fragment),_o=n(),Oe=a("p"),vo=c("Convert a PIL image or a list of PIL images to NumPy arrays."),bo=n(),F=a("div"),g(me.$$.fragment),$o=n(),Re=a("p"),yo=c("Preprocess the image input. Accepted formats are PIL images, NumPy arrays or PyTorch tensors."),Po=n(),W=a("div"),g(de.$$.fragment),Io=n(),Se=a("p"),wo=c("Convert a PyTorch tensor to a NumPy image."),Vo=n(),q=a("div"),g(le.$$.fragment),Eo=n(),Be=a("p"),Do=c("Resize image."),Ze=n(),x=a("h2"),H=a("a"),Ge=a("span"),g(pe.$$.fragment),Lo=n(),Fe=a("span"),zo=c("VaeImageProcessorLDM3D"),er=n(),U=a("p"),xo=c("The "),We=a("code"),To=c("VaeImageProcessorLDM3D"),No=c(" accepts RGB and depth inputs and returns RGB and depth outputs."),rr=n(),P=a("div"),g(ge.$$.fragment),Ao=n(),qe=a("p"),Co=c("Image processor for VAE LDM3D."),Mo=n(),Y=a("div"),g(fe.$$.fragment),ko=n(),He=a("p"),Oo=c("Convert a NumPy depth image or a batch of images to a PIL image."),Ro=n(),J=a("div"),g(ue.$$.fragment),So=n(),Ue=a("p"),Bo=c("Convert a NumPy image or a batch of images to a PIL image."),Go=n(),j=a("div"),g(he.$$.fragment),Fo=n(),Ye=a("p"),Wo=c("Returns: depth map"),this.h()},l(r){const b=La('[data-svelte="svelte-1phssyn"]',document.head);D=t(b,"META",{name:!0,content:!0}),b.forEach(o),Je=i(r),L=t(r,"H1",{class:!0});var ar=s(L);T=t(ar,"A",{id:!0,class:!0,href:!0});var Uo=s(T);be=t(Uo,"SPAN",{});var Yo=s(be);f(K.$$.fragment,Yo),Yo.forEach(o),Uo.forEach(o),Ir=i(ar),$e=t(ar,"SPAN",{});var Jo=s($e);wr=m(Jo,"VAE Image Processor"),Jo.forEach(o),ar.forEach(o),je=i(r),w=t(r,"P",{});var ve=s(w);Vr=m(ve,"The "),ye=t(ve,"CODE",{});var jo=s(ye);Er=m(jo,"VaeImageProcessor"),jo.forEach(o),Dr=m(ve," provides a unified API for "),_e=t(ve,"A",{href:!0});var Ko=s(_e);Lr=m(Ko,"StableDiffusionPipeline"),Ko.forEach(o),zr=m(ve,"\u2019s to prepare image inputs for VAE encoding and post-processing outputs once they\u2019re decoded. This includes transformations such as resizing, normalization, and conversion between PIL Image, PyTorch, and NumPy arrays."),ve.forEach(o),Ke=i(r),I=t(r,"P",{});var V=s(I);xr=m(V,"All pipelines with "),Pe=t(V,"CODE",{});var Qo=s(Pe);Tr=m(Qo,"VaeImageProcessor"),Qo.forEach(o),Nr=m(V," accepts PIL Image, PyTorch tensor, or NumPy arrays as image inputs and returns outputs based on the "),Ie=t(V,"CODE",{});var Xo=s(Ie);Ar=m(Xo,"output_type"),Xo.forEach(o),Cr=m(V," argument by the user. You can pass encoded image latents directly to the pipeline and return latents from the pipeline as a specific output with the "),we=t(V,"CODE",{});var Zo=s(we);Mr=m(Zo,"output_type"),Zo.forEach(o),kr=m(V," argument (for example "),Ve=t(V,"CODE",{});var ea=s(Ve);Or=m(ea,'output_type="pt"'),ea.forEach(o),Rr=m(V,"). This allows you to take the generated latents from one pipeline and pass it to another pipeline as input without leaving the latent space. It also makes it much easier to use multiple pipelines together by passing PyTorch tensors directly between different pipelines."),V.forEach(o),Qe=i(r),z=t(r,"H2",{class:!0});var tr=s(z);N=t(tr,"A",{id:!0,class:!0,href:!0});var ra=s(N);Ee=t(ra,"SPAN",{});var oa=s(Ee);f(Q.$$.fragment,oa),oa.forEach(o),ra.forEach(o),Sr=i(tr),De=t(tr,"SPAN",{});var aa=s(De);Br=m(aa,"VaeImageProcessor"),aa.forEach(o),tr.forEach(o),Xe=i(r),d=t(r,"DIV",{class:!0});var p=s(d);f(X.$$.fragment,p),Gr=i(p),Le=t(p,"P",{});var ta=s(Le);Fr=m(ta,"Image processor for VAE."),ta.forEach(o),Wr=i(p),A=t(p,"DIV",{class:!0});var sr=s(A);f(Z.$$.fragment,sr),qr=i(sr),ze=t(sr,"P",{});var sa=s(ze);Hr=m(sa,"create a mask"),sa.forEach(o),sr.forEach(o),Ur=i(p),C=t(p,"DIV",{class:!0});var nr=s(C);f(ee.$$.fragment,nr),Yr=i(nr),xe=t(nr,"P",{});var na=s(xe);Jr=m(na,"Converts a PIL image to grayscale format."),na.forEach(o),nr.forEach(o),jr=i(p),M=t(p,"DIV",{class:!0});var ir=s(M);f(re.$$.fragment,ir),Kr=i(ir),Te=t(ir,"P",{});var ia=s(Te);Qr=m(ia,"Converts a PIL image to RGB format."),ia.forEach(o),ir.forEach(o),Xr=i(p),k=t(p,"DIV",{class:!0});var cr=s(k);f(oe.$$.fragment,cr),Zr=i(cr),Ne=t(cr,"P",{});var ca=s(Ne);eo=m(ca,"Denormalize an image array to [0,1]."),ca.forEach(o),cr.forEach(o),ro=i(p),O=t(p,"DIV",{class:!0});var mr=s(O);f(ae.$$.fragment,mr),oo=i(mr),te=t(mr,"P",{});var dr=s(te);ao=m(dr,`This function return the height and width that are downscaled to the next integer multiple of | |
| `),Ae=t(dr,"CODE",{});var ma=s(Ae);to=m(ma,"vae_scale_factor"),ma.forEach(o),so=m(dr,"."),dr.forEach(o),mr.forEach(o),no=i(p),R=t(p,"DIV",{class:!0});var lr=s(R);f(se.$$.fragment,lr),io=i(lr),Ce=t(lr,"P",{});var da=s(Ce);co=m(da,"Normalize an image array to [-1,1]."),da.forEach(o),lr.forEach(o),mo=i(p),S=t(p,"DIV",{class:!0});var pr=s(S);f(ne.$$.fragment,pr),lo=i(pr),Me=t(pr,"P",{});var la=s(Me);po=m(la,"Convert a numpy image or a batch of images to a PIL image."),la.forEach(o),pr.forEach(o),go=i(p),B=t(p,"DIV",{class:!0});var gr=s(B);f(ie.$$.fragment,gr),fo=i(gr),ke=t(gr,"P",{});var pa=s(ke);uo=m(pa,"Convert a NumPy image to a PyTorch tensor."),pa.forEach(o),gr.forEach(o),ho=i(p),G=t(p,"DIV",{class:!0});var fr=s(G);f(ce.$$.fragment,fr),_o=i(fr),Oe=t(fr,"P",{});var ga=s(Oe);vo=m(ga,"Convert a PIL image or a list of PIL images to NumPy arrays."),ga.forEach(o),fr.forEach(o),bo=i(p),F=t(p,"DIV",{class:!0});var ur=s(F);f(me.$$.fragment,ur),$o=i(ur),Re=t(ur,"P",{});var fa=s(Re);yo=m(fa,"Preprocess the image input. Accepted formats are PIL images, NumPy arrays or PyTorch tensors."),fa.forEach(o),ur.forEach(o),Po=i(p),W=t(p,"DIV",{class:!0});var hr=s(W);f(de.$$.fragment,hr),Io=i(hr),Se=t(hr,"P",{});var ua=s(Se);wo=m(ua,"Convert a PyTorch tensor to a NumPy image."),ua.forEach(o),hr.forEach(o),Vo=i(p),q=t(p,"DIV",{class:!0});var _r=s(q);f(le.$$.fragment,_r),Eo=i(_r),Be=t(_r,"P",{});var ha=s(Be);Do=m(ha,"Resize image."),ha.forEach(o),_r.forEach(o),p.forEach(o),Ze=i(r),x=t(r,"H2",{class:!0});var vr=s(x);H=t(vr,"A",{id:!0,class:!0,href:!0});var _a=s(H);Ge=t(_a,"SPAN",{});var va=s(Ge);f(pe.$$.fragment,va),va.forEach(o),_a.forEach(o),Lo=i(vr),Fe=t(vr,"SPAN",{});var ba=s(Fe);zo=m(ba,"VaeImageProcessorLDM3D"),ba.forEach(o),vr.forEach(o),er=i(r),U=t(r,"P",{});var br=s(U);xo=m(br,"The "),We=t(br,"CODE",{});var $a=s(We);To=m($a,"VaeImageProcessorLDM3D"),$a.forEach(o),No=m(br," accepts RGB and depth inputs and returns RGB and depth outputs."),br.forEach(o),rr=i(r),P=t(r,"DIV",{class:!0});var E=s(P);f(ge.$$.fragment,E),Ao=i(E),qe=t(E,"P",{});var ya=s(qe);Co=m(ya,"Image processor for VAE LDM3D."),ya.forEach(o),Mo=i(E),Y=t(E,"DIV",{class:!0});var $r=s(Y);f(fe.$$.fragment,$r),ko=i($r),He=t($r,"P",{});var Pa=s(He);Oo=m(Pa,"Convert a NumPy depth image or a batch of images to a PIL image."),Pa.forEach(o),$r.forEach(o),Ro=i(E),J=t(E,"DIV",{class:!0});var yr=s(J);f(ue.$$.fragment,yr),So=i(yr),Ue=t(yr,"P",{});var Ia=s(Ue);Bo=m(Ia,"Convert a NumPy image or a batch of images to a PIL image."),Ia.forEach(o),yr.forEach(o),Go=i(E),j=t(E,"DIV",{class:!0});var Pr=s(j);f(he.$$.fragment,Pr),Fo=i(Pr),Ye=t(Pr,"P",{});var wa=s(Ye);Wo=m(wa,"Returns: depth map"),wa.forEach(o),Pr.forEach(o),E.forEach(o),this.h()},h(){l(D,"name","hf:doc:metadata"),l(D,"content",JSON.stringify(Na)),l(T,"id","vae-image-processor"),l(T,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(T,"href","#vae-image-processor"),l(L,"class","relative group"),l(_e,"href","/docs/diffusers/main/en/api/pipelines/stable_diffusion/text2img#diffusers.StableDiffusionPipeline"),l(N,"id","diffusers.image_processor.VaeImageProcessor"),l(N,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(N,"href","#diffusers.image_processor.VaeImageProcessor"),l(z,"class","relative group"),l(A,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(C,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(M,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(k,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(O,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(R,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(S,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(B,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(G,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(F,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(W,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(d,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(H,"id","diffusers.image_processor.VaeImageProcessorLDM3D"),l(H,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),l(H,"href","#diffusers.image_processor.VaeImageProcessorLDM3D"),l(x,"class","relative group"),l(Y,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(J,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(j,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),l(P,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(r,b){e(document.head,D),y(r,Je,b),y(r,L,b),e(L,T),e(T,be),u(K,be,null),e(L,Ir),e(L,$e),e($e,wr),y(r,je,b),y(r,w,b),e(w,Vr),e(w,ye),e(ye,Er),e(w,Dr),e(w,_e),e(_e,Lr),e(w,zr),y(r,Ke,b),y(r,I,b),e(I,xr),e(I,Pe),e(Pe,Tr),e(I,Nr),e(I,Ie),e(Ie,Ar),e(I,Cr),e(I,we),e(we,Mr),e(I,kr),e(I,Ve),e(Ve,Or),e(I,Rr),y(r,Qe,b),y(r,z,b),e(z,N),e(N,Ee),u(Q,Ee,null),e(z,Sr),e(z,De),e(De,Br),y(r,Xe,b),y(r,d,b),u(X,d,null),e(d,Gr),e(d,Le),e(Le,Fr),e(d,Wr),e(d,A),u(Z,A,null),e(A,qr),e(A,ze),e(ze,Hr),e(d,Ur),e(d,C),u(ee,C,null),e(C,Yr),e(C,xe),e(xe,Jr),e(d,jr),e(d,M),u(re,M,null),e(M,Kr),e(M,Te),e(Te,Qr),e(d,Xr),e(d,k),u(oe,k,null),e(k,Zr),e(k,Ne),e(Ne,eo),e(d,ro),e(d,O),u(ae,O,null),e(O,oo),e(O,te),e(te,ao),e(te,Ae),e(Ae,to),e(te,so),e(d,no),e(d,R),u(se,R,null),e(R,io),e(R,Ce),e(Ce,co),e(d,mo),e(d,S),u(ne,S,null),e(S,lo),e(S,Me),e(Me,po),e(d,go),e(d,B),u(ie,B,null),e(B,fo),e(B,ke),e(ke,uo),e(d,ho),e(d,G),u(ce,G,null),e(G,_o),e(G,Oe),e(Oe,vo),e(d,bo),e(d,F),u(me,F,null),e(F,$o),e(F,Re),e(Re,yo),e(d,Po),e(d,W),u(de,W,null),e(W,Io),e(W,Se),e(Se,wo),e(d,Vo),e(d,q),u(le,q,null),e(q,Eo),e(q,Be),e(Be,Do),y(r,Ze,b),y(r,x,b),e(x,H),e(H,Ge),u(pe,Ge,null),e(x,Lo),e(x,Fe),e(Fe,zo),y(r,er,b),y(r,U,b),e(U,xo),e(U,We),e(We,To),e(U,No),y(r,rr,b),y(r,P,b),u(ge,P,null),e(P,Ao),e(P,qe),e(qe,Co),e(P,Mo),e(P,Y),u(fe,Y,null),e(Y,ko),e(Y,He),e(He,Oo),e(P,Ro),e(P,J),u(ue,J,null),e(J,So),e(J,Ue),e(Ue,Bo),e(P,Go),e(P,j),u(he,j,null),e(j,Fo),e(j,Ye),e(Ye,Wo),or=!0},p:za,i(r){or||(h(K.$$.fragment,r),h(Q.$$.fragment,r),h(X.$$.fragment,r),h(Z.$$.fragment,r),h(ee.$$.fragment,r),h(re.$$.fragment,r),h(oe.$$.fragment,r),h(ae.$$.fragment,r),h(se.$$.fragment,r),h(ne.$$.fragment,r),h(ie.$$.fragment,r),h(ce.$$.fragment,r),h(me.$$.fragment,r),h(de.$$.fragment,r),h(le.$$.fragment,r),h(pe.$$.fragment,r),h(ge.$$.fragment,r),h(fe.$$.fragment,r),h(ue.$$.fragment,r),h(he.$$.fragment,r),or=!0)},o(r){_(K.$$.fragment,r),_(Q.$$.fragment,r),_(X.$$.fragment,r),_(Z.$$.fragment,r),_(ee.$$.fragment,r),_(re.$$.fragment,r),_(oe.$$.fragment,r),_(ae.$$.fragment,r),_(se.$$.fragment,r),_(ne.$$.fragment,r),_(ie.$$.fragment,r),_(ce.$$.fragment,r),_(me.$$.fragment,r),_(de.$$.fragment,r),_(le.$$.fragment,r),_(pe.$$.fragment,r),_(ge.$$.fragment,r),_(fe.$$.fragment,r),_(ue.$$.fragment,r),_(he.$$.fragment,r),or=!1},d(r){o(D),r&&o(Je),r&&o(L),v(K),r&&o(je),r&&o(w),r&&o(Ke),r&&o(I),r&&o(Qe),r&&o(z),v(Q),r&&o(Xe),r&&o(d),v(X),v(Z),v(ee),v(re),v(oe),v(ae),v(se),v(ne),v(ie),v(ce),v(me),v(de),v(le),r&&o(Ze),r&&o(x),v(pe),r&&o(er),r&&o(U),r&&o(rr),r&&o(P),v(ge),v(fe),v(ue),v(he)}}}const Na={local:"vae-image-processor",sections:[{local:"diffusers.image_processor.VaeImageProcessor",title:"VaeImageProcessor"},{local:"diffusers.image_processor.VaeImageProcessorLDM3D",title:"VaeImageProcessorLDM3D"}],title:"VAE Image Processor"};function Aa(Ho){return xa(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Oa extends Va{constructor(D){super();Ea(this,D,Aa,Ta,Da,{})}}export{Oa as default,Na as metadata}; | |
Xet Storage Details
- Size:
- 25.9 kB
- Xet hash:
- 54ce832e308c0e6fd311dc3ed9e1723cccbf6f165626a0c2a4ec64ee3802affc
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.