Buckets:

rtrm's picture
download
raw
6.4 kB
import{s as W,n as X,o as Y}from"../chunks/scheduler.8c3d61f6.js";import{S as Z,i as ee,g as c,s as r,r as y,A as oe,h as p,f as t,c as i,j as S,u as w,x as R,k,y as b,a as s,v as x,d as V,t as L,w as T}from"../chunks/index.da70eac4.js";import{D as K}from"../chunks/Docstring.6b390b9a.js";import{H as Q,E as te}from"../chunks/EditOnGithub.1e64e623.js";function se(U){let n,I,$,D,h,N,l,B="The <code>VideoProcessor</code> provides a unified API for video pipelines to prepare inputs for VAE encoding and post-processing outputs once they’re decoded. The class inherits <code>VaeImageProcessor</code> so it includes transformations such as resizing, normalization, and conversion between PIL Image, PyTorch, and NumPy arrays.",C,f,E,d,m,j,v,F="Preprocesses input video(s).",z,a,u,G,_,J="Converts a video tensor to a list of frames for export.",A,g,H,P,M;return h=new Q({props:{title:"Video Processor",local:"video-processor",headingTag:"h1"}}),f=new Q({props:{title:"VideoProcessor",local:"diffusers.video_processor.VideoProcessor.preprocess_video",headingTag:"h2"}}),m=new K({props:{name:"diffusers.video_processor.VideoProcessor.preprocess_video",anchor:"diffusers.video_processor.VideoProcessor.preprocess_video",parameters:[{name:"video",val:""},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"}],parametersDescription:[{anchor:"diffusers.video_processor.VideoProcessor.preprocess_video.video",description:`<strong>video</strong> (<code>List[PIL.Image]</code>, <code>List[List[PIL.Image]]</code>, <code>torch.Tensor</code>, <code>np.array</code>, <code>List[torch.Tensor]</code>, <code>List[np.array]</code>) &#x2014;
The input video. It can be one of the following:<ul>
<li>List of the PIL images.</li>
<li>List of list of PIL images.</li>
<li>4D Torch tensors (expected shape for each tensor <code>(num_frames, num_channels, height, width)</code>).</li>
<li>4D NumPy arrays (expected shape for each array <code>(num_frames, height, width, num_channels)</code>).</li>
<li>List of 4D Torch tensors (expected shape for each tensor <code>(num_frames, num_channels, height, width)</code>).</li>
<li>List of 4D NumPy arrays (expected shape for each array <code>(num_frames, height, width, num_channels)</code>).</li>
<li>5D NumPy arrays: expected shape for each array <code>(batch_size, num_frames, height, width, num_channels)</code>.</li>
<li>5D Torch tensors: expected shape for each array <code>(batch_size, num_frames, num_channels, height, width)</code>.</li>
</ul>`,name:"video"},{anchor:"diffusers.video_processor.VideoProcessor.preprocess_video.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to <code>None</code>) &#x2014;
The height in preprocessed frames of the video. If <code>None</code>, will use the <code>get_default_height_width()</code> to
get default height.`,name:"height"},{anchor:"diffusers.video_processor.VideoProcessor.preprocess_video.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em><code>, defaults to </code>None<code>) -- The width in preprocessed frames of the video. If </code>None<code>, will use get_default_height_width()</code> to get
the default width.`,name:"width"}],source:"https://github.com/huggingface/diffusers/blob/vr_10083/src/diffusers/video_processor.py#L28"}}),u=new K({props:{name:"diffusers.video_processor.VideoProcessor.postprocess_video",anchor:"diffusers.video_processor.VideoProcessor.postprocess_video",parameters:[{name:"video",val:": Tensor"},{name:"output_type",val:": str = 'np'"}],parametersDescription:[{anchor:"diffusers.video_processor.VideoProcessor.postprocess_video.video",description:"<strong>video</strong> (<code>torch.Tensor</code>) &#x2014; The video as a tensor.",name:"video"},{anchor:"diffusers.video_processor.VideoProcessor.postprocess_video.output_type",description:"<strong>output_type</strong> (<code>str</code>, defaults to <code>&quot;np&quot;</code>) &#x2014; Output type of the postprocessed <code>video</code> tensor.",name:"output_type"}],source:"https://github.com/huggingface/diffusers/blob/vr_10083/src/diffusers/video_processor.py#L89"}}),g=new te({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/video_processor.md"}}),{c(){n=c("meta"),I=r(),$=c("p"),D=r(),y(h.$$.fragment),N=r(),l=c("p"),l.innerHTML=B,C=r(),y(f.$$.fragment),E=r(),d=c("div"),y(m.$$.fragment),j=r(),v=c("p"),v.textContent=F,z=r(),a=c("div"),y(u.$$.fragment),G=r(),_=c("p"),_.textContent=J,A=r(),y(g.$$.fragment),H=r(),P=c("p"),this.h()},l(e){const o=oe("svelte-u9bgzb",document.head);n=p(o,"META",{name:!0,content:!0}),o.forEach(t),I=i(e),$=p(e,"P",{}),S($).forEach(t),D=i(e),w(h.$$.fragment,e),N=i(e),l=p(e,"P",{"data-svelte-h":!0}),R(l)!=="svelte-64r56a"&&(l.innerHTML=B),C=i(e),w(f.$$.fragment,e),E=i(e),d=p(e,"DIV",{class:!0});var O=S(d);w(m.$$.fragment,O),j=i(O),v=p(O,"P",{"data-svelte-h":!0}),R(v)!=="svelte-8z7kxl"&&(v.textContent=F),O.forEach(t),z=i(e),a=p(e,"DIV",{class:!0});var q=S(a);w(u.$$.fragment,q),G=i(q),_=p(q,"P",{"data-svelte-h":!0}),R(_)!=="svelte-36vyht"&&(_.textContent=J),q.forEach(t),A=i(e),w(g.$$.fragment,e),H=i(e),P=p(e,"P",{}),S(P).forEach(t),this.h()},h(){k(n,"name","hf:doc:metadata"),k(n,"content",re),k(d,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),k(a,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,o){b(document.head,n),s(e,I,o),s(e,$,o),s(e,D,o),x(h,e,o),s(e,N,o),s(e,l,o),s(e,C,o),x(f,e,o),s(e,E,o),s(e,d,o),x(m,d,null),b(d,j),b(d,v),s(e,z,o),s(e,a,o),x(u,a,null),b(a,G),b(a,_),s(e,A,o),x(g,e,o),s(e,H,o),s(e,P,o),M=!0},p:X,i(e){M||(V(h.$$.fragment,e),V(f.$$.fragment,e),V(m.$$.fragment,e),V(u.$$.fragment,e),V(g.$$.fragment,e),M=!0)},o(e){L(h.$$.fragment,e),L(f.$$.fragment,e),L(m.$$.fragment,e),L(u.$$.fragment,e),L(g.$$.fragment,e),M=!1},d(e){e&&(t(I),t($),t(D),t(N),t(l),t(C),t(E),t(d),t(z),t(a),t(A),t(H),t(P)),t(n),T(h,e),T(f,e),T(m),T(u),T(g,e)}}}const re='{"title":"Video Processor","local":"video-processor","sections":[{"title":"VideoProcessor","local":"diffusers.video_processor.VideoProcessor.preprocess_video","sections":[],"depth":2}],"depth":1}';function ie(U){return Y(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class pe extends Z{constructor(n){super(),ee(this,n,ie,se,W,{})}}export{pe as component};

Xet Storage Details

Size:
6.4 kB
·
Xet hash:
7e0ddf933256bf855d1183e704a327153259b47e1ad39378f2333ba261aa1a99

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.