Buckets:
| import{s as He,n as Ve,o as ze}from"../chunks/scheduler.8c3d61f6.js";import{S as Ae,i as Fe,g as d,s as r,r as u,A as Ne,h as a,f as s,c as n,j as v,u as p,x as S,k as $,y as t,a as l,v as f,d as m,t as h,w as g}from"../chunks/index.da70eac4.js";import{D as A}from"../chunks/Docstring.6b390b9a.js";import{H as Me,E as Ue}from"../chunks/EditOnGithub.1e64e623.js";function We(ye){let _,J,G,Q,T,X,w,Te='<code>LMSDiscreteScheduler</code> is a linear multistep scheduler for discrete beta schedules. The scheduler is ported from and created by <a href="https://github.com/crowsonkb/" rel="nofollow">Katherine Crowson</a>, and the original implementation can be found at <a href="https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L181" rel="nofollow">crowsonkb/k-diffusion</a>.',Z,C,ee,o,O,pe,F,we="A linear multistep scheduler for discrete beta schedules.",fe,N,Ce=`This model inherits from <a href="/docs/diffusers/pr_10567/en/api/schedulers/overview#diffusers.SchedulerMixin">SchedulerMixin</a> and <a href="/docs/diffusers/pr_10567/en/api/configuration#diffusers.ConfigMixin">ConfigMixin</a>. Check the superclass documentation for the generic | |
| methods the library implements for all schedulers such as loading and saving.`,me,x,P,he,U,Oe="Compute the linear multistep coefficient.",ge,D,k,_e,W,Pe=`Ensures interchangeability with schedulers that need to scale the denoising model input depending on the | |
| current timestep.`,be,L,E,ve,R,ke="Sets the begin index for the scheduler. This function should be run from pipeline before the inference.",Se,M,I,$e,j,Ee="Sets the discrete timesteps used for the diffusion chain (to be run before inference).",xe,y,q,De,K,Ie=`Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion | |
| process from the learned model outputs (most often the predicted noise).`,te,H,se,b,V,Le,B,qe="Output class for the scheduler’s <code>step</code> function output.",re,z,ne,Y,oe;return T=new Me({props:{title:"LMSDiscreteScheduler",local:"lmsdiscretescheduler",headingTag:"h1"}}),C=new Me({props:{title:"LMSDiscreteScheduler",local:"diffusers.LMSDiscreteScheduler",headingTag:"h2"}}),O=new A({props:{name:"class diffusers.LMSDiscreteScheduler",anchor:"diffusers.LMSDiscreteScheduler",parameters:[{name:"num_train_timesteps",val:": int = 1000"},{name:"beta_start",val:": float = 0.0001"},{name:"beta_end",val:": float = 0.02"},{name:"beta_schedule",val:": str = 'linear'"},{name:"trained_betas",val:": typing.Union[numpy.ndarray, typing.List[float], NoneType] = None"},{name:"use_karras_sigmas",val:": typing.Optional[bool] = False"},{name:"use_exponential_sigmas",val:": typing.Optional[bool] = False"},{name:"use_beta_sigmas",val:": typing.Optional[bool] = False"},{name:"prediction_type",val:": str = 'epsilon'"},{name:"timestep_spacing",val:": str = 'linspace'"},{name:"steps_offset",val:": int = 0"}],parametersDescription:[{anchor:"diffusers.LMSDiscreteScheduler.num_train_timesteps",description:`<strong>num_train_timesteps</strong> (<code>int</code>, defaults to 1000) — | |
| The number of diffusion steps to train the model.`,name:"num_train_timesteps"},{anchor:"diffusers.LMSDiscreteScheduler.beta_start",description:`<strong>beta_start</strong> (<code>float</code>, defaults to 0.0001) — | |
| The starting <code>beta</code> value of inference.`,name:"beta_start"},{anchor:"diffusers.LMSDiscreteScheduler.beta_end",description:`<strong>beta_end</strong> (<code>float</code>, defaults to 0.02) — | |
| The final <code>beta</code> value.`,name:"beta_end"},{anchor:"diffusers.LMSDiscreteScheduler.beta_schedule",description:`<strong>beta_schedule</strong> (<code>str</code>, defaults to <code>"linear"</code>) — | |
| The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from | |
| <code>linear</code> or <code>scaled_linear</code>.`,name:"beta_schedule"},{anchor:"diffusers.LMSDiscreteScheduler.trained_betas",description:`<strong>trained_betas</strong> (<code>np.ndarray</code>, <em>optional</em>) — | |
| Pass an array of betas directly to the constructor to bypass <code>beta_start</code> and <code>beta_end</code>.`,name:"trained_betas"},{anchor:"diffusers.LMSDiscreteScheduler.use_karras_sigmas",description:`<strong>use_karras_sigmas</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If <code>True</code>, | |
| the sigmas are determined according to a sequence of noise levels {σi}.`,name:"use_karras_sigmas"},{anchor:"diffusers.LMSDiscreteScheduler.use_exponential_sigmas",description:`<strong>use_exponential_sigmas</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether to use exponential sigmas for step sizes in the noise schedule during the sampling process.`,name:"use_exponential_sigmas"},{anchor:"diffusers.LMSDiscreteScheduler.use_beta_sigmas",description:`<strong>use_beta_sigmas</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether to use beta sigmas for step sizes in the noise schedule during the sampling process. Refer to <a href="https://huggingface.co/papers/2407.12173" rel="nofollow">Beta | |
| Sampling is All You Need</a> for more information.`,name:"use_beta_sigmas"},{anchor:"diffusers.LMSDiscreteScheduler.prediction_type",description:`<strong>prediction_type</strong> (<code>str</code>, defaults to <code>epsilon</code>, <em>optional</em>) — | |
| Prediction type of the scheduler function; can be <code>epsilon</code> (predicts the noise of the diffusion process), | |
| <code>sample</code> (directly predicts the noisy sample<code>) or </code>v_prediction\` (see section 2.4 of <a href="https://imagen.research.google/video/paper.pdf" rel="nofollow">Imagen | |
| Video</a> paper).`,name:"prediction_type"},{anchor:"diffusers.LMSDiscreteScheduler.timestep_spacing",description:`<strong>timestep_spacing</strong> (<code>str</code>, defaults to <code>"linspace"</code>) — | |
| The way the timesteps should be scaled. Refer to Table 2 of the <a href="https://huggingface.co/papers/2305.08891" rel="nofollow">Common Diffusion Noise Schedules and | |
| Sample Steps are Flawed</a> for more information.`,name:"timestep_spacing"},{anchor:"diffusers.LMSDiscreteScheduler.steps_offset",description:`<strong>steps_offset</strong> (<code>int</code>, defaults to 0) — | |
| An offset added to the inference steps, as required by some model families.`,name:"steps_offset"}],source:"https://github.com/huggingface/diffusers/blob/vr_10567/src/diffusers/schedulers/scheduling_lms_discrete.py#L93"}}),P=new A({props:{name:"get_lms_coefficient",anchor:"diffusers.LMSDiscreteScheduler.get_lms_coefficient",parameters:[{name:"order",val:""},{name:"t",val:""},{name:"current_order",val:""}],parametersDescription:[{anchor:"diffusers.LMSDiscreteScheduler.get_lms_coefficient.order",description:"<strong>order</strong> () —",name:"order"},{anchor:"diffusers.LMSDiscreteScheduler.get_lms_coefficient.t",description:"<strong>t</strong> () —",name:"t"},{anchor:"diffusers.LMSDiscreteScheduler.get_lms_coefficient.current_order",description:"<strong>current_order</strong> () —",name:"current_order"}],source:"https://github.com/huggingface/diffusers/blob/vr_10567/src/diffusers/schedulers/scheduling_lms_discrete.py#L241"}}),k=new A({props:{name:"scale_model_input",anchor:"diffusers.LMSDiscreteScheduler.scale_model_input",parameters:[{name:"sample",val:": Tensor"},{name:"timestep",val:": typing.Union[float, torch.Tensor]"}],parametersDescription:[{anchor:"diffusers.LMSDiscreteScheduler.scale_model_input.sample",description:`<strong>sample</strong> (<code>torch.Tensor</code>) — | |
| The input sample.`,name:"sample"},{anchor:"diffusers.LMSDiscreteScheduler.scale_model_input.timestep",description:`<strong>timestep</strong> (<code>float</code> or <code>torch.Tensor</code>) — | |
| The current timestep in the diffusion chain.`,name:"timestep"}],source:"https://github.com/huggingface/diffusers/blob/vr_10567/src/diffusers/schedulers/scheduling_lms_discrete.py#L217",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>A scaled input sample.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>torch.Tensor</code></p> | |
| `}}),E=new A({props:{name:"set_begin_index",anchor:"diffusers.LMSDiscreteScheduler.set_begin_index",parameters:[{name:"begin_index",val:": int = 0"}],parametersDescription:[{anchor:"diffusers.LMSDiscreteScheduler.set_begin_index.begin_index",description:`<strong>begin_index</strong> (<code>int</code>) — | |
| The begin index for the scheduler.`,name:"begin_index"}],source:"https://github.com/huggingface/diffusers/blob/vr_10567/src/diffusers/schedulers/scheduling_lms_discrete.py#L207"}}),I=new A({props:{name:"set_timesteps",anchor:"diffusers.LMSDiscreteScheduler.set_timesteps",parameters:[{name:"num_inference_steps",val:": int"},{name:"device",val:": typing.Union[str, torch.device] = None"}],parametersDescription:[{anchor:"diffusers.LMSDiscreteScheduler.set_timesteps.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>) — | |
| The number of diffusion steps used when generating samples with a pre-trained model.`,name:"num_inference_steps"},{anchor:"diffusers.LMSDiscreteScheduler.set_timesteps.device",description:`<strong>device</strong> (<code>str</code> or <code>torch.device</code>, <em>optional</em>) — | |
| The device to which the timesteps should be moved to. If <code>None</code>, the timesteps are not moved.`,name:"device"}],source:"https://github.com/huggingface/diffusers/blob/vr_10567/src/diffusers/schedulers/scheduling_lms_discrete.py#L263"}}),q=new A({props:{name:"step",anchor:"diffusers.LMSDiscreteScheduler.step",parameters:[{name:"model_output",val:": Tensor"},{name:"timestep",val:": typing.Union[float, torch.Tensor]"},{name:"sample",val:": Tensor"},{name:"order",val:": int = 4"},{name:"return_dict",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.LMSDiscreteScheduler.step.model_output",description:`<strong>model_output</strong> (<code>torch.Tensor</code>) — | |
| The direct output from learned diffusion model.`,name:"model_output"},{anchor:"diffusers.LMSDiscreteScheduler.step.timestep",description:`<strong>timestep</strong> (<code>float</code> or <code>torch.Tensor</code>) — | |
| The current discrete timestep in the diffusion chain.`,name:"timestep"},{anchor:"diffusers.LMSDiscreteScheduler.step.sample",description:`<strong>sample</strong> (<code>torch.Tensor</code>) — | |
| A current instance of a sample created by the diffusion process.`,name:"sample"},{anchor:"diffusers.LMSDiscreteScheduler.step.order",description:`<strong>order</strong> (<code>int</code>, defaults to 4) — | |
| The order of the linear multistep method.`,name:"order"},{anchor:"diffusers.LMSDiscreteScheduler.step.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <a href="/docs/diffusers/pr_10567/en/api/schedulers/multistep_dpm_solver#diffusers.schedulers.scheduling_utils.SchedulerOutput">SchedulerOutput</a> or tuple.`,name:"return_dict"}],source:"https://github.com/huggingface/diffusers/blob/vr_10567/src/diffusers/schedulers/scheduling_lms_discrete.py#L437",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>If return_dict is <code>True</code>, <a | |
| href="/docs/diffusers/pr_10567/en/api/schedulers/multistep_dpm_solver#diffusers.schedulers.scheduling_utils.SchedulerOutput" | |
| >SchedulerOutput</a> is returned, otherwise a | |
| tuple is returned where the first element is the sample tensor.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><a | |
| href="/docs/diffusers/pr_10567/en/api/schedulers/multistep_dpm_solver#diffusers.schedulers.scheduling_utils.SchedulerOutput" | |
| >SchedulerOutput</a> or <code>tuple</code></p> | |
| `}}),H=new Me({props:{title:"LMSDiscreteSchedulerOutput",local:"diffusers.schedulers.scheduling_lms_discrete.LMSDiscreteSchedulerOutput",headingTag:"h2"}}),V=new A({props:{name:"class diffusers.schedulers.scheduling_lms_discrete.LMSDiscreteSchedulerOutput",anchor:"diffusers.schedulers.scheduling_lms_discrete.LMSDiscreteSchedulerOutput",parameters:[{name:"prev_sample",val:": Tensor"},{name:"pred_original_sample",val:": typing.Optional[torch.Tensor] = None"}],parametersDescription:[{anchor:"diffusers.schedulers.scheduling_lms_discrete.LMSDiscreteSchedulerOutput.prev_sample",description:`<strong>prev_sample</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, num_channels, height, width)</code> for images) — | |
| Computed sample <code>(x_{t-1})</code> of previous timestep. <code>prev_sample</code> should be used as next model input in the | |
| denoising loop.`,name:"prev_sample"},{anchor:"diffusers.schedulers.scheduling_lms_discrete.LMSDiscreteSchedulerOutput.pred_original_sample",description:`<strong>pred_original_sample</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, num_channels, height, width)</code> for images) — | |
| The predicted denoised sample <code>(x_{0})</code> based on the model output from the current timestep. | |
| <code>pred_original_sample</code> can be used to preview progress or for guidance.`,name:"pred_original_sample"}],source:"https://github.com/huggingface/diffusers/blob/vr_10567/src/diffusers/schedulers/scheduling_lms_discrete.py#L29"}}),z=new Ue({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/schedulers/lms_discrete.md"}}),{c(){_=d("meta"),J=r(),G=d("p"),Q=r(),u(T.$$.fragment),X=r(),w=d("p"),w.innerHTML=Te,Z=r(),u(C.$$.fragment),ee=r(),o=d("div"),u(O.$$.fragment),pe=r(),F=d("p"),F.textContent=we,fe=r(),N=d("p"),N.innerHTML=Ce,me=r(),x=d("div"),u(P.$$.fragment),he=r(),U=d("p"),U.textContent=Oe,ge=r(),D=d("div"),u(k.$$.fragment),_e=r(),W=d("p"),W.textContent=Pe,be=r(),L=d("div"),u(E.$$.fragment),ve=r(),R=d("p"),R.textContent=ke,Se=r(),M=d("div"),u(I.$$.fragment),$e=r(),j=d("p"),j.textContent=Ee,xe=r(),y=d("div"),u(q.$$.fragment),De=r(),K=d("p"),K.textContent=Ie,te=r(),u(H.$$.fragment),se=r(),b=d("div"),u(V.$$.fragment),Le=r(),B=d("p"),B.innerHTML=qe,re=r(),u(z.$$.fragment),ne=r(),Y=d("p"),this.h()},l(e){const i=Ne("svelte-u9bgzb",document.head);_=a(i,"META",{name:!0,content:!0}),i.forEach(s),J=n(e),G=a(e,"P",{}),v(G).forEach(s),Q=n(e),p(T.$$.fragment,e),X=n(e),w=a(e,"P",{"data-svelte-h":!0}),S(w)!=="svelte-1otp9cb"&&(w.innerHTML=Te),Z=n(e),p(C.$$.fragment,e),ee=n(e),o=a(e,"DIV",{class:!0});var c=v(o);p(O.$$.fragment,c),pe=n(c),F=a(c,"P",{"data-svelte-h":!0}),S(F)!=="svelte-1s0vl92"&&(F.textContent=we),fe=n(c),N=a(c,"P",{"data-svelte-h":!0}),S(N)!=="svelte-1lnhhyf"&&(N.innerHTML=Ce),me=n(c),x=a(c,"DIV",{class:!0});var ie=v(x);p(P.$$.fragment,ie),he=n(ie),U=a(ie,"P",{"data-svelte-h":!0}),S(U)!=="svelte-l6j38t"&&(U.textContent=Oe),ie.forEach(s),ge=n(c),D=a(c,"DIV",{class:!0});var de=v(D);p(k.$$.fragment,de),_e=n(de),W=a(de,"P",{"data-svelte-h":!0}),S(W)!=="svelte-1rkfgpx"&&(W.textContent=Pe),de.forEach(s),be=n(c),L=a(c,"DIV",{class:!0});var ae=v(L);p(E.$$.fragment,ae),ve=n(ae),R=a(ae,"P",{"data-svelte-h":!0}),S(R)!=="svelte-1k141rk"&&(R.textContent=ke),ae.forEach(s),Se=n(c),M=a(c,"DIV",{class:!0});var ce=v(M);p(I.$$.fragment,ce),$e=n(ce),j=a(ce,"P",{"data-svelte-h":!0}),S(j)!=="svelte-1vzm9q"&&(j.textContent=Ee),ce.forEach(s),xe=n(c),y=a(c,"DIV",{class:!0});var le=v(y);p(q.$$.fragment,le),De=n(le),K=a(le,"P",{"data-svelte-h":!0}),S(K)!=="svelte-hi84tp"&&(K.textContent=Ie),le.forEach(s),c.forEach(s),te=n(e),p(H.$$.fragment,e),se=n(e),b=a(e,"DIV",{class:!0});var ue=v(b);p(V.$$.fragment,ue),Le=n(ue),B=a(ue,"P",{"data-svelte-h":!0}),S(B)!=="svelte-id9kic"&&(B.innerHTML=qe),ue.forEach(s),re=n(e),p(z.$$.fragment,e),ne=n(e),Y=a(e,"P",{}),v(Y).forEach(s),this.h()},h(){$(_,"name","hf:doc:metadata"),$(_,"content",Re),$(x,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(D,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(L,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(M,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(y,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(o,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(b,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,i){t(document.head,_),l(e,J,i),l(e,G,i),l(e,Q,i),f(T,e,i),l(e,X,i),l(e,w,i),l(e,Z,i),f(C,e,i),l(e,ee,i),l(e,o,i),f(O,o,null),t(o,pe),t(o,F),t(o,fe),t(o,N),t(o,me),t(o,x),f(P,x,null),t(x,he),t(x,U),t(o,ge),t(o,D),f(k,D,null),t(D,_e),t(D,W),t(o,be),t(o,L),f(E,L,null),t(L,ve),t(L,R),t(o,Se),t(o,M),f(I,M,null),t(M,$e),t(M,j),t(o,xe),t(o,y),f(q,y,null),t(y,De),t(y,K),l(e,te,i),f(H,e,i),l(e,se,i),l(e,b,i),f(V,b,null),t(b,Le),t(b,B),l(e,re,i),f(z,e,i),l(e,ne,i),l(e,Y,i),oe=!0},p:Ve,i(e){oe||(m(T.$$.fragment,e),m(C.$$.fragment,e),m(O.$$.fragment,e),m(P.$$.fragment,e),m(k.$$.fragment,e),m(E.$$.fragment,e),m(I.$$.fragment,e),m(q.$$.fragment,e),m(H.$$.fragment,e),m(V.$$.fragment,e),m(z.$$.fragment,e),oe=!0)},o(e){h(T.$$.fragment,e),h(C.$$.fragment,e),h(O.$$.fragment,e),h(P.$$.fragment,e),h(k.$$.fragment,e),h(E.$$.fragment,e),h(I.$$.fragment,e),h(q.$$.fragment,e),h(H.$$.fragment,e),h(V.$$.fragment,e),h(z.$$.fragment,e),oe=!1},d(e){e&&(s(J),s(G),s(Q),s(X),s(w),s(Z),s(ee),s(o),s(te),s(se),s(b),s(re),s(ne),s(Y)),s(_),g(T,e),g(C,e),g(O),g(P),g(k),g(E),g(I),g(q),g(H,e),g(V),g(z,e)}}}const Re='{"title":"LMSDiscreteScheduler","local":"lmsdiscretescheduler","sections":[{"title":"LMSDiscreteScheduler","local":"diffusers.LMSDiscreteScheduler","sections":[],"depth":2},{"title":"LMSDiscreteSchedulerOutput","local":"diffusers.schedulers.scheduling_lms_discrete.LMSDiscreteSchedulerOutput","sections":[],"depth":2}],"depth":1}';function je(ye){return ze(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Je extends Ae{constructor(_){super(),Fe(this,_,je,We,He,{})}}export{Je as component}; | |
Xet Storage Details
- Size:
- 18.4 kB
- Xet hash:
- 3de19672b5690e9926a4775fe4646f32193a60c107cfa989fdf0d8d6c8602d47
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.