Buckets:

rtrm's picture
download
raw
35.9 kB
import{s as Et,o as Ht,n as kt}from"../chunks/scheduler.8c3d61f6.js";import{S as qt,i as Nt,g as i,s as r,r as u,A as Vt,h as d,f as s,c as n,j as S,u as m,x as c,k as b,y as t,a as p,v as f,d as h,t as g,w as _}from"../chunks/index.da70eac4.js";import{T as zt}from"../chunks/Tip.1d9b8c37.js";import{D as x}from"../chunks/Docstring.6b390b9a.js";import{H as je,E as At}from"../chunks/EditOnGithub.1e64e623.js";function Ft(be){let v,k=`The algorithm and model type are decoupled. You can use either DPMSolver or DPMSolver++ for both noise
prediction and data prediction models.`;return{c(){v=i("p"),v.textContent=k},l($){v=d($,"P",{"data-svelte-h":!0}),c(v)!=="svelte-95n5s"&&(v.textContent=k)},m($,ne){p($,v,ne)},p:kt,d($){$&&s(v)}}}function Wt(be){let v,k,$,ne,q,xe,N,ht='<code>DPMSolverSinglestepScheduler</code> is a single step scheduler from <a href="https://huggingface.co/papers/2206.00927" rel="nofollow">DPM-Solver: A Fast ODE Solver for Diffusion Probabilistic Model Sampling in Around 10 Steps</a> and <a href="https://huggingface.co/papers/2211.01095" rel="nofollow">DPM-Solver++: Fast Solver for Guided Sampling of Diffusion Probabilistic Models</a> by Cheng Lu, Yuhao Zhou, Fan Bao, Jianfei Chen, Chongxuan Li, and Jun Zhu.',$e,V,gt=`DPMSolver (and the improved version DPMSolver++) is a fast dedicated high-order solver for diffusion ODEs with convergence order guarantee. Empirically, DPMSolver sampling with only 20 steps can generate high-quality
samples, and it can generate quite good samples even in 10 steps.`,De,z,_t='The original implementation can be found at <a href="https://github.com/LuChengTHU/dpm-solver" rel="nofollow">LuChengTHU/dpm-solver</a>.',Te,A,ye,F,vt="It is recommended to set <code>solver_order</code> to 2 for guide sampling, and <code>solver_order=3</code> for unconditional sampling.",Pe,W,St=`Dynamic thresholding from <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen</a> is supported, and for pixel-space
diffusion models, you can set both <code>algorithm_type=&quot;dpmsolver++&quot;</code> and <code>thresholding=True</code> to use dynamic
thresholding. This thresholding method is unsuitable for latent-space diffusion models such as
Stable Diffusion.`,Me,U,we,l,j,Be,oe,bt="<code>DPMSolverSinglestepScheduler</code> is a fast dedicated high-order solver for diffusion ODEs.",Ge,ie,xt=`This model inherits from <a href="/docs/diffusers/pr_10101/en/api/schedulers/overview#diffusers.SchedulerMixin">SchedulerMixin</a> and <a href="/docs/diffusers/pr_10101/en/api/configuration#diffusers.ConfigMixin">ConfigMixin</a>. Check the superclass documentation for the generic
methods the library implements for all schedulers such as loading and saving.`,Ye,D,B,Je,de,$t=`Convert the model output to the corresponding type the DPMSolver/DPMSolver++ algorithm needs. DPM-Solver is
designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to discretize an
integral of the data prediction model.`,Re,y,Ze,P,G,Ke,le,Dt="One step for the first-order DPMSolver (equivalent to DDIM).",Qe,M,Y,Xe,ae,Tt="Computes the solver order at each time step.",et,w,J,tt,pe,yt=`Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
current timestep.`,st,C,R,rt,ce,Pt="Sets the begin index for the scheduler. This function should be run from pipeline before the inference.",nt,L,Z,ot,ue,Mt="Sets the discrete timesteps used for the diffusion chain (to be run before inference).",it,O,K,dt,me,wt=`One step for the second-order singlestep DPMSolver that computes the solution at time <code>prev_timestep</code> from the
time <code>timestep_list[-2]</code>.`,lt,I,Q,at,fe,Ct=`One step for the third-order singlestep DPMSolver that computes the solution at time <code>prev_timestep</code> from the
time <code>timestep_list[-3]</code>.`,pt,E,X,ct,he,Lt="One step for the singlestep DPMSolver.",ut,H,ee,mt,ge,Ot=`Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with
the singlestep DPMSolver.`,Ce,te,Le,T,se,ft,_e,It="Base class for the output of a scheduler’s <code>step</code> function.",Oe,re,Ie,Se,Ee;return q=new je({props:{title:"DPMSolverSinglestepScheduler",local:"dpmsolversinglestepscheduler",headingTag:"h1"}}),A=new je({props:{title:"Tips",local:"tips",headingTag:"h2"}}),U=new je({props:{title:"DPMSolverSinglestepScheduler",local:"diffusers.DPMSolverSinglestepScheduler",headingTag:"h2"}}),j=new x({props:{name:"class diffusers.DPMSolverSinglestepScheduler",anchor:"diffusers.DPMSolverSinglestepScheduler",parameters:[{name:"num_train_timesteps",val:": int = 1000"},{name:"beta_start",val:": float = 0.0001"},{name:"beta_end",val:": float = 0.02"},{name:"beta_schedule",val:": str = 'linear'"},{name:"trained_betas",val:": typing.Optional[numpy.ndarray] = None"},{name:"solver_order",val:": int = 2"},{name:"prediction_type",val:": str = 'epsilon'"},{name:"thresholding",val:": bool = False"},{name:"dynamic_thresholding_ratio",val:": float = 0.995"},{name:"sample_max_value",val:": float = 1.0"},{name:"algorithm_type",val:": str = 'dpmsolver++'"},{name:"solver_type",val:": str = 'midpoint'"},{name:"lower_order_final",val:": bool = False"},{name:"use_karras_sigmas",val:": typing.Optional[bool] = False"},{name:"use_exponential_sigmas",val:": typing.Optional[bool] = False"},{name:"use_beta_sigmas",val:": typing.Optional[bool] = False"},{name:"final_sigmas_type",val:": typing.Optional[str] = 'zero'"},{name:"lambda_min_clipped",val:": float = -inf"},{name:"variance_type",val:": typing.Optional[str] = None"}],parametersDescription:[{anchor:"diffusers.DPMSolverSinglestepScheduler.num_train_timesteps",description:`<strong>num_train_timesteps</strong> (<code>int</code>, defaults to 1000) &#x2014;
The number of diffusion steps to train the model.`,name:"num_train_timesteps"},{anchor:"diffusers.DPMSolverSinglestepScheduler.beta_start",description:`<strong>beta_start</strong> (<code>float</code>, defaults to 0.0001) &#x2014;
The starting <code>beta</code> value of inference.`,name:"beta_start"},{anchor:"diffusers.DPMSolverSinglestepScheduler.beta_end",description:`<strong>beta_end</strong> (<code>float</code>, defaults to 0.02) &#x2014;
The final <code>beta</code> value.`,name:"beta_end"},{anchor:"diffusers.DPMSolverSinglestepScheduler.beta_schedule",description:`<strong>beta_schedule</strong> (<code>str</code>, defaults to <code>&quot;linear&quot;</code>) &#x2014;
The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
<code>linear</code>, <code>scaled_linear</code>, or <code>squaredcos_cap_v2</code>.`,name:"beta_schedule"},{anchor:"diffusers.DPMSolverSinglestepScheduler.trained_betas",description:`<strong>trained_betas</strong> (<code>np.ndarray</code>, <em>optional</em>) &#x2014;
Pass an array of betas directly to the constructor to bypass <code>beta_start</code> and <code>beta_end</code>.`,name:"trained_betas"},{anchor:"diffusers.DPMSolverSinglestepScheduler.solver_order",description:`<strong>solver_order</strong> (<code>int</code>, defaults to 2) &#x2014;
The DPMSolver order which can be <code>1</code> or <code>2</code> or <code>3</code>. It is recommended to use <code>solver_order=2</code> for guided
sampling, and <code>solver_order=3</code> for unconditional sampling.`,name:"solver_order"},{anchor:"diffusers.DPMSolverSinglestepScheduler.prediction_type",description:`<strong>prediction_type</strong> (<code>str</code>, defaults to <code>epsilon</code>, <em>optional</em>) &#x2014;
Prediction type of the scheduler function; can be <code>epsilon</code> (predicts the noise of the diffusion process),
<code>sample</code> (directly predicts the noisy sample<code>) or </code>v_prediction\` (see section 2.4 of <a href="https://imagen.research.google/video/paper.pdf" rel="nofollow">Imagen
Video</a> paper).`,name:"prediction_type"},{anchor:"diffusers.DPMSolverSinglestepScheduler.thresholding",description:`<strong>thresholding</strong> (<code>bool</code>, defaults to <code>False</code>) &#x2014;
Whether to use the &#x201C;dynamic thresholding&#x201D; method. This is unsuitable for latent-space diffusion models such
as Stable Diffusion.`,name:"thresholding"},{anchor:"diffusers.DPMSolverSinglestepScheduler.dynamic_thresholding_ratio",description:`<strong>dynamic_thresholding_ratio</strong> (<code>float</code>, defaults to 0.995) &#x2014;
The ratio for the dynamic thresholding method. Valid only when <code>thresholding=True</code>.`,name:"dynamic_thresholding_ratio"},{anchor:"diffusers.DPMSolverSinglestepScheduler.sample_max_value",description:`<strong>sample_max_value</strong> (<code>float</code>, defaults to 1.0) &#x2014;
The threshold value for dynamic thresholding. Valid only when <code>thresholding=True</code> and
<code>algorithm_type=&quot;dpmsolver++&quot;</code>.`,name:"sample_max_value"},{anchor:"diffusers.DPMSolverSinglestepScheduler.algorithm_type",description:`<strong>algorithm_type</strong> (<code>str</code>, defaults to <code>dpmsolver++</code>) &#x2014;
Algorithm type for the solver; can be <code>dpmsolver</code> or <code>dpmsolver++</code> or <code>sde-dpmsolver++</code>. The <code>dpmsolver</code>
type implements the algorithms in the <a href="https://huggingface.co/papers/2206.00927" rel="nofollow">DPMSolver</a> paper, and the
<code>dpmsolver++</code> type implements the algorithms in the <a href="https://huggingface.co/papers/2211.01095" rel="nofollow">DPMSolver++</a>
paper. It is recommended to use <code>dpmsolver++</code> or <code>sde-dpmsolver++</code> with <code>solver_order=2</code> for guided
sampling like in Stable Diffusion.`,name:"algorithm_type"},{anchor:"diffusers.DPMSolverSinglestepScheduler.solver_type",description:`<strong>solver_type</strong> (<code>str</code>, defaults to <code>midpoint</code>) &#x2014;
Solver type for the second-order solver; can be <code>midpoint</code> or <code>heun</code>. The solver type slightly affects the
sample quality, especially for a small number of steps. It is recommended to use <code>midpoint</code> solvers.`,name:"solver_type"},{anchor:"diffusers.DPMSolverSinglestepScheduler.lower_order_final",description:`<strong>lower_order_final</strong> (<code>bool</code>, defaults to <code>True</code>) &#x2014;
Whether to use lower-order solvers in the final steps. Only valid for &lt; 15 inference steps. This can
stabilize the sampling of DPMSolver for steps &lt; 15, especially for steps &lt;= 10.`,name:"lower_order_final"},{anchor:"diffusers.DPMSolverSinglestepScheduler.use_karras_sigmas",description:`<strong>use_karras_sigmas</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014;
Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If <code>True</code>,
the sigmas are determined according to a sequence of noise levels {&#x3C3;i}.`,name:"use_karras_sigmas"},{anchor:"diffusers.DPMSolverSinglestepScheduler.use_exponential_sigmas",description:`<strong>use_exponential_sigmas</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014;
Whether to use exponential sigmas for step sizes in the noise schedule during the sampling process.`,name:"use_exponential_sigmas"},{anchor:"diffusers.DPMSolverSinglestepScheduler.use_beta_sigmas",description:`<strong>use_beta_sigmas</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014;
Whether to use beta sigmas for step sizes in the noise schedule during the sampling process. Refer to <a href="https://huggingface.co/papers/2407.12173" rel="nofollow">Beta
Sampling is All You Need</a> for more information.`,name:"use_beta_sigmas"},{anchor:"diffusers.DPMSolverSinglestepScheduler.final_sigmas_type",description:`<strong>final_sigmas_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;zero&quot;</code>) &#x2014;
The final <code>sigma</code> value for the noise schedule during the sampling process. If <code>&quot;sigma_min&quot;</code>, the final
sigma is the same as the last sigma in the training schedule. If <code>zero</code>, the final sigma is set to 0.`,name:"final_sigmas_type"},{anchor:"diffusers.DPMSolverSinglestepScheduler.lambda_min_clipped",description:`<strong>lambda_min_clipped</strong> (<code>float</code>, defaults to <code>-inf</code>) &#x2014;
Clipping threshold for the minimum value of <code>lambda(t)</code> for numerical stability. This is critical for the
cosine (<code>squaredcos_cap_v2</code>) noise schedule.`,name:"lambda_min_clipped"},{anchor:"diffusers.DPMSolverSinglestepScheduler.variance_type",description:`<strong>variance_type</strong> (<code>str</code>, <em>optional</em>) &#x2014;
Set to &#x201C;learned&#x201D; or &#x201C;learned_range&#x201D; for diffusion models that predict variance. If set, the model&#x2019;s output
contains the predicted Gaussian variance.`,name:"variance_type"}],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py#L80"}}),B=new x({props:{name:"convert_model_output",anchor:"diffusers.DPMSolverSinglestepScheduler.convert_model_output",parameters:[{name:"model_output",val:": Tensor"},{name:"*args",val:""},{name:"sample",val:": Tensor = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.DPMSolverSinglestepScheduler.convert_model_output.model_output",description:`<strong>model_output</strong> (<code>torch.Tensor</code>) &#x2014;
The direct output from the learned diffusion model.`,name:"model_output"},{anchor:"diffusers.DPMSolverSinglestepScheduler.convert_model_output.sample",description:`<strong>sample</strong> (<code>torch.Tensor</code>) &#x2014;
A current instance of a sample created by the diffusion process.`,name:"sample"}],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py#L542",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p>The converted model output.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>torch.Tensor</code></p>
`}}),y=new zt({props:{$$slots:{default:[Ft]},$$scope:{ctx:be}}}),G=new x({props:{name:"dpm_solver_first_order_update",anchor:"diffusers.DPMSolverSinglestepScheduler.dpm_solver_first_order_update",parameters:[{name:"model_output",val:": Tensor"},{name:"*args",val:""},{name:"sample",val:": Tensor = None"},{name:"noise",val:": typing.Optional[torch.Tensor] = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.DPMSolverSinglestepScheduler.dpm_solver_first_order_update.model_output",description:`<strong>model_output</strong> (<code>torch.Tensor</code>) &#x2014;
The direct output from the learned diffusion model.`,name:"model_output"},{anchor:"diffusers.DPMSolverSinglestepScheduler.dpm_solver_first_order_update.timestep",description:`<strong>timestep</strong> (<code>int</code>) &#x2014;
The current discrete timestep in the diffusion chain.`,name:"timestep"},{anchor:"diffusers.DPMSolverSinglestepScheduler.dpm_solver_first_order_update.prev_timestep",description:`<strong>prev_timestep</strong> (<code>int</code>) &#x2014;
The previous discrete timestep in the diffusion chain.`,name:"prev_timestep"},{anchor:"diffusers.DPMSolverSinglestepScheduler.dpm_solver_first_order_update.sample",description:`<strong>sample</strong> (<code>torch.Tensor</code>) &#x2014;
A current instance of a sample created by the diffusion process.`,name:"sample"}],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py#L639",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p>The sample tensor at the previous timestep.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>torch.Tensor</code></p>
`}}),Y=new x({props:{name:"get_order_list",anchor:"diffusers.DPMSolverSinglestepScheduler.get_order_list",parameters:[{name:"num_inference_steps",val:": int"}],parametersDescription:[{anchor:"diffusers.DPMSolverSinglestepScheduler.get_order_list.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>) &#x2014;
The number of diffusion steps used when generating samples with a pre-trained model.`,name:"num_inference_steps"}],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py#L233"}}),J=new x({props:{name:"scale_model_input",anchor:"diffusers.DPMSolverSinglestepScheduler.scale_model_input",parameters:[{name:"sample",val:": Tensor"},{name:"*args",val:""},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.DPMSolverSinglestepScheduler.scale_model_input.sample",description:`<strong>sample</strong> (<code>torch.Tensor</code>) &#x2014;
The input sample.`,name:"sample"}],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py#L1103",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p>A scaled input sample.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>torch.Tensor</code></p>
`}}),R=new x({props:{name:"set_begin_index",anchor:"diffusers.DPMSolverSinglestepScheduler.set_begin_index",parameters:[{name:"begin_index",val:": int = 0"}],parametersDescription:[{anchor:"diffusers.DPMSolverSinglestepScheduler.set_begin_index.begin_index",description:`<strong>begin_index</strong> (<code>int</code>) &#x2014;
The begin index for the scheduler.`,name:"begin_index"}],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py#L288"}}),Z=new x({props:{name:"set_timesteps",anchor:"diffusers.DPMSolverSinglestepScheduler.set_timesteps",parameters:[{name:"num_inference_steps",val:": int = None"},{name:"device",val:": typing.Union[str, torch.device] = None"},{name:"timesteps",val:": typing.Optional[typing.List[int]] = None"}],parametersDescription:[{anchor:"diffusers.DPMSolverSinglestepScheduler.set_timesteps.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>) &#x2014;
The number of diffusion steps used when generating samples with a pre-trained model.`,name:"num_inference_steps"},{anchor:"diffusers.DPMSolverSinglestepScheduler.set_timesteps.device",description:`<strong>device</strong> (<code>str</code> or <code>torch.device</code>, <em>optional</em>) &#x2014;
The device to which the timesteps should be moved to. If <code>None</code>, the timesteps are not moved.`,name:"device"},{anchor:"diffusers.DPMSolverSinglestepScheduler.set_timesteps.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014;
Custom timesteps used to support arbitrary spacing between timesteps. If <code>None</code>, then the default
timestep spacing strategy of equal spacing between timesteps schedule is used. If <code>timesteps</code> is
passed, <code>num_inference_steps</code> must be <code>None</code>.`,name:"timesteps"}],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py#L298"}}),K=new x({props:{name:"singlestep_dpm_solver_second_order_update",anchor:"diffusers.DPMSolverSinglestepScheduler.singlestep_dpm_solver_second_order_update",parameters:[{name:"model_output_list",val:": typing.List[torch.Tensor]"},{name:"*args",val:""},{name:"sample",val:": Tensor = None"},{name:"noise",val:": typing.Optional[torch.Tensor] = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.DPMSolverSinglestepScheduler.singlestep_dpm_solver_second_order_update.model_output_list",description:`<strong>model_output_list</strong> (<code>List[torch.Tensor]</code>) &#x2014;
The direct outputs from learned diffusion model at current and latter timesteps.`,name:"model_output_list"},{anchor:"diffusers.DPMSolverSinglestepScheduler.singlestep_dpm_solver_second_order_update.timestep",description:`<strong>timestep</strong> (<code>int</code>) &#x2014;
The current and latter discrete timestep in the diffusion chain.`,name:"timestep"},{anchor:"diffusers.DPMSolverSinglestepScheduler.singlestep_dpm_solver_second_order_update.prev_timestep",description:`<strong>prev_timestep</strong> (<code>int</code>) &#x2014;
The previous discrete timestep in the diffusion chain.`,name:"prev_timestep"},{anchor:"diffusers.DPMSolverSinglestepScheduler.singlestep_dpm_solver_second_order_update.sample",description:`<strong>sample</strong> (<code>torch.Tensor</code>) &#x2014;
A current instance of a sample created by the diffusion process.`,name:"sample"}],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py#L703",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p>The sample tensor at the previous timestep.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>torch.Tensor</code></p>
`}}),Q=new x({props:{name:"singlestep_dpm_solver_third_order_update",anchor:"diffusers.DPMSolverSinglestepScheduler.singlestep_dpm_solver_third_order_update",parameters:[{name:"model_output_list",val:": typing.List[torch.Tensor]"},{name:"*args",val:""},{name:"sample",val:": Tensor = None"},{name:"noise",val:": typing.Optional[torch.Tensor] = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.DPMSolverSinglestepScheduler.singlestep_dpm_solver_third_order_update.model_output_list",description:`<strong>model_output_list</strong> (<code>List[torch.Tensor]</code>) &#x2014;
The direct outputs from learned diffusion model at current and latter timesteps.`,name:"model_output_list"},{anchor:"diffusers.DPMSolverSinglestepScheduler.singlestep_dpm_solver_third_order_update.timestep",description:`<strong>timestep</strong> (<code>int</code>) &#x2014;
The current and latter discrete timestep in the diffusion chain.`,name:"timestep"},{anchor:"diffusers.DPMSolverSinglestepScheduler.singlestep_dpm_solver_third_order_update.prev_timestep",description:`<strong>prev_timestep</strong> (<code>int</code>) &#x2014;
The previous discrete timestep in the diffusion chain.`,name:"prev_timestep"},{anchor:"diffusers.DPMSolverSinglestepScheduler.singlestep_dpm_solver_third_order_update.sample",description:`<strong>sample</strong> (<code>torch.Tensor</code>) &#x2014;
A current instance of a sample created by diffusion process.`,name:"sample"}],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py#L814",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p>The sample tensor at the previous timestep.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>torch.Tensor</code></p>
`}}),X=new x({props:{name:"singlestep_dpm_solver_update",anchor:"diffusers.DPMSolverSinglestepScheduler.singlestep_dpm_solver_update",parameters:[{name:"model_output_list",val:": typing.List[torch.Tensor]"},{name:"*args",val:""},{name:"sample",val:": Tensor = None"},{name:"order",val:": int = None"},{name:"noise",val:": typing.Optional[torch.Tensor] = None"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.DPMSolverSinglestepScheduler.singlestep_dpm_solver_update.model_output_list",description:`<strong>model_output_list</strong> (<code>List[torch.Tensor]</code>) &#x2014;
The direct outputs from learned diffusion model at current and latter timesteps.`,name:"model_output_list"},{anchor:"diffusers.DPMSolverSinglestepScheduler.singlestep_dpm_solver_update.timestep",description:`<strong>timestep</strong> (<code>int</code>) &#x2014;
The current and latter discrete timestep in the diffusion chain.`,name:"timestep"},{anchor:"diffusers.DPMSolverSinglestepScheduler.singlestep_dpm_solver_update.prev_timestep",description:`<strong>prev_timestep</strong> (<code>int</code>) &#x2014;
The previous discrete timestep in the diffusion chain.`,name:"prev_timestep"},{anchor:"diffusers.DPMSolverSinglestepScheduler.singlestep_dpm_solver_update.sample",description:`<strong>sample</strong> (<code>torch.Tensor</code>) &#x2014;
A current instance of a sample created by diffusion process.`,name:"sample"},{anchor:"diffusers.DPMSolverSinglestepScheduler.singlestep_dpm_solver_update.order",description:`<strong>order</strong> (<code>int</code>) &#x2014;
The solver order at this step.`,name:"order"}],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py#L936",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p>The sample tensor at the previous timestep.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>torch.Tensor</code></p>
`}}),ee=new x({props:{name:"step",anchor:"diffusers.DPMSolverSinglestepScheduler.step",parameters:[{name:"model_output",val:": Tensor"},{name:"timestep",val:": typing.Union[int, torch.Tensor]"},{name:"sample",val:": Tensor"},{name:"generator",val:" = None"},{name:"return_dict",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.DPMSolverSinglestepScheduler.step.model_output",description:`<strong>model_output</strong> (<code>torch.Tensor</code>) &#x2014;
The direct output from learned diffusion model.`,name:"model_output"},{anchor:"diffusers.DPMSolverSinglestepScheduler.step.timestep",description:`<strong>timestep</strong> (<code>int</code>) &#x2014;
The current discrete timestep in the diffusion chain.`,name:"timestep"},{anchor:"diffusers.DPMSolverSinglestepScheduler.step.sample",description:`<strong>sample</strong> (<code>torch.Tensor</code>) &#x2014;
A current instance of a sample created by the diffusion process.`,name:"sample"},{anchor:"diffusers.DPMSolverSinglestepScheduler.step.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>) &#x2014;
Whether or not to return a <a href="/docs/diffusers/pr_10101/en/api/schedulers/multistep_dpm_solver#diffusers.schedulers.scheduling_utils.SchedulerOutput">SchedulerOutput</a> or <code>tuple</code>.`,name:"return_dict"}],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py#L1032",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p>If return_dict is <code>True</code>, <a
href="/docs/diffusers/pr_10101/en/api/schedulers/multistep_dpm_solver#diffusers.schedulers.scheduling_utils.SchedulerOutput"
>SchedulerOutput</a> is returned, otherwise a
tuple is returned where the first element is the sample tensor.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><a
href="/docs/diffusers/pr_10101/en/api/schedulers/multistep_dpm_solver#diffusers.schedulers.scheduling_utils.SchedulerOutput"
>SchedulerOutput</a> or <code>tuple</code></p>
`}}),te=new je({props:{title:"SchedulerOutput",local:"diffusers.schedulers.scheduling_utils.SchedulerOutput",headingTag:"h2"}}),se=new x({props:{name:"class diffusers.schedulers.scheduling_utils.SchedulerOutput",anchor:"diffusers.schedulers.scheduling_utils.SchedulerOutput",parameters:[{name:"prev_sample",val:": Tensor"}],parametersDescription:[{anchor:"diffusers.schedulers.scheduling_utils.SchedulerOutput.prev_sample",description:`<strong>prev_sample</strong> (<code>torch.Tensor</code> of shape <code>(batch_size, num_channels, height, width)</code> for images) &#x2014;
Computed sample <code>(x_{t-1})</code> of previous timestep. <code>prev_sample</code> should be used as next model input in the
denoising loop.`,name:"prev_sample"}],source:"https://github.com/huggingface/diffusers/blob/vr_10101/src/diffusers/schedulers/scheduling_utils.py#L60"}}),re=new At({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/schedulers/singlestep_dpm_solver.md"}}),{c(){v=i("meta"),k=r(),$=i("p"),ne=r(),u(q.$$.fragment),xe=r(),N=i("p"),N.innerHTML=ht,$e=r(),V=i("p"),V.textContent=gt,De=r(),z=i("p"),z.innerHTML=_t,Te=r(),u(A.$$.fragment),ye=r(),F=i("p"),F.innerHTML=vt,Pe=r(),W=i("p"),W.innerHTML=St,Me=r(),u(U.$$.fragment),we=r(),l=i("div"),u(j.$$.fragment),Be=r(),oe=i("p"),oe.innerHTML=bt,Ge=r(),ie=i("p"),ie.innerHTML=xt,Ye=r(),D=i("div"),u(B.$$.fragment),Je=r(),de=i("p"),de.textContent=$t,Re=r(),u(y.$$.fragment),Ze=r(),P=i("div"),u(G.$$.fragment),Ke=r(),le=i("p"),le.textContent=Dt,Qe=r(),M=i("div"),u(Y.$$.fragment),Xe=r(),ae=i("p"),ae.textContent=Tt,et=r(),w=i("div"),u(J.$$.fragment),tt=r(),pe=i("p"),pe.textContent=yt,st=r(),C=i("div"),u(R.$$.fragment),rt=r(),ce=i("p"),ce.textContent=Pt,nt=r(),L=i("div"),u(Z.$$.fragment),ot=r(),ue=i("p"),ue.textContent=Mt,it=r(),O=i("div"),u(K.$$.fragment),dt=r(),me=i("p"),me.innerHTML=wt,lt=r(),I=i("div"),u(Q.$$.fragment),at=r(),fe=i("p"),fe.innerHTML=Ct,pt=r(),E=i("div"),u(X.$$.fragment),ct=r(),he=i("p"),he.textContent=Lt,ut=r(),H=i("div"),u(ee.$$.fragment),mt=r(),ge=i("p"),ge.textContent=Ot,Ce=r(),u(te.$$.fragment),Le=r(),T=i("div"),u(se.$$.fragment),ft=r(),_e=i("p"),_e.innerHTML=It,Oe=r(),u(re.$$.fragment),Ie=r(),Se=i("p"),this.h()},l(e){const o=Vt("svelte-u9bgzb",document.head);v=d(o,"META",{name:!0,content:!0}),o.forEach(s),k=n(e),$=d(e,"P",{}),S($).forEach(s),ne=n(e),m(q.$$.fragment,e),xe=n(e),N=d(e,"P",{"data-svelte-h":!0}),c(N)!=="svelte-zw7rt6"&&(N.innerHTML=ht),$e=n(e),V=d(e,"P",{"data-svelte-h":!0}),c(V)!=="svelte-l6rv24"&&(V.textContent=gt),De=n(e),z=d(e,"P",{"data-svelte-h":!0}),c(z)!=="svelte-1mk5p6f"&&(z.innerHTML=_t),Te=n(e),m(A.$$.fragment,e),ye=n(e),F=d(e,"P",{"data-svelte-h":!0}),c(F)!=="svelte-pt2glg"&&(F.innerHTML=vt),Pe=n(e),W=d(e,"P",{"data-svelte-h":!0}),c(W)!=="svelte-18sd27c"&&(W.innerHTML=St),Me=n(e),m(U.$$.fragment,e),we=n(e),l=d(e,"DIV",{class:!0});var a=S(l);m(j.$$.fragment,a),Be=n(a),oe=d(a,"P",{"data-svelte-h":!0}),c(oe)!=="svelte-1g2jbmp"&&(oe.innerHTML=bt),Ge=n(a),ie=d(a,"P",{"data-svelte-h":!0}),c(ie)!=="svelte-kgurhv"&&(ie.innerHTML=xt),Ye=n(a),D=d(a,"DIV",{class:!0});var ve=S(D);m(B.$$.fragment,ve),Je=n(ve),de=d(ve,"P",{"data-svelte-h":!0}),c(de)!=="svelte-1st02i8"&&(de.textContent=$t),Re=n(ve),m(y.$$.fragment,ve),ve.forEach(s),Ze=n(a),P=d(a,"DIV",{class:!0});var He=S(P);m(G.$$.fragment,He),Ke=n(He),le=d(He,"P",{"data-svelte-h":!0}),c(le)!=="svelte-4nw43d"&&(le.textContent=Dt),He.forEach(s),Qe=n(a),M=d(a,"DIV",{class:!0});var ke=S(M);m(Y.$$.fragment,ke),Xe=n(ke),ae=d(ke,"P",{"data-svelte-h":!0}),c(ae)!=="svelte-13tpuy9"&&(ae.textContent=Tt),ke.forEach(s),et=n(a),w=d(a,"DIV",{class:!0});var qe=S(w);m(J.$$.fragment,qe),tt=n(qe),pe=d(qe,"P",{"data-svelte-h":!0}),c(pe)!=="svelte-1rkfgpx"&&(pe.textContent=yt),qe.forEach(s),st=n(a),C=d(a,"DIV",{class:!0});var Ne=S(C);m(R.$$.fragment,Ne),rt=n(Ne),ce=d(Ne,"P",{"data-svelte-h":!0}),c(ce)!=="svelte-1k141rk"&&(ce.textContent=Pt),Ne.forEach(s),nt=n(a),L=d(a,"DIV",{class:!0});var Ve=S(L);m(Z.$$.fragment,Ve),ot=n(Ve),ue=d(Ve,"P",{"data-svelte-h":!0}),c(ue)!=="svelte-1vzm9q"&&(ue.textContent=Mt),Ve.forEach(s),it=n(a),O=d(a,"DIV",{class:!0});var ze=S(O);m(K.$$.fragment,ze),dt=n(ze),me=d(ze,"P",{"data-svelte-h":!0}),c(me)!=="svelte-1rmndna"&&(me.innerHTML=wt),ze.forEach(s),lt=n(a),I=d(a,"DIV",{class:!0});var Ae=S(I);m(Q.$$.fragment,Ae),at=n(Ae),fe=d(Ae,"P",{"data-svelte-h":!0}),c(fe)!=="svelte-17kajtk"&&(fe.innerHTML=Ct),Ae.forEach(s),pt=n(a),E=d(a,"DIV",{class:!0});var Fe=S(E);m(X.$$.fragment,Fe),ct=n(Fe),he=d(Fe,"P",{"data-svelte-h":!0}),c(he)!=="svelte-zgpbva"&&(he.textContent=Lt),Fe.forEach(s),ut=n(a),H=d(a,"DIV",{class:!0});var We=S(H);m(ee.$$.fragment,We),mt=n(We),ge=d(We,"P",{"data-svelte-h":!0}),c(ge)!=="svelte-7earhm"&&(ge.textContent=Ot),We.forEach(s),a.forEach(s),Ce=n(e),m(te.$$.fragment,e),Le=n(e),T=d(e,"DIV",{class:!0});var Ue=S(T);m(se.$$.fragment,Ue),ft=n(Ue),_e=d(Ue,"P",{"data-svelte-h":!0}),c(_e)!=="svelte-6ojmkw"&&(_e.innerHTML=It),Ue.forEach(s),Oe=n(e),m(re.$$.fragment,e),Ie=n(e),Se=d(e,"P",{}),S(Se).forEach(s),this.h()},h(){b(v,"name","hf:doc:metadata"),b(v,"content",Ut),b(D,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(P,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(M,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(w,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(C,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(L,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(O,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(I,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(E,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(H,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(l,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(T,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,o){t(document.head,v),p(e,k,o),p(e,$,o),p(e,ne,o),f(q,e,o),p(e,xe,o),p(e,N,o),p(e,$e,o),p(e,V,o),p(e,De,o),p(e,z,o),p(e,Te,o),f(A,e,o),p(e,ye,o),p(e,F,o),p(e,Pe,o),p(e,W,o),p(e,Me,o),f(U,e,o),p(e,we,o),p(e,l,o),f(j,l,null),t(l,Be),t(l,oe),t(l,Ge),t(l,ie),t(l,Ye),t(l,D),f(B,D,null),t(D,Je),t(D,de),t(D,Re),f(y,D,null),t(l,Ze),t(l,P),f(G,P,null),t(P,Ke),t(P,le),t(l,Qe),t(l,M),f(Y,M,null),t(M,Xe),t(M,ae),t(l,et),t(l,w),f(J,w,null),t(w,tt),t(w,pe),t(l,st),t(l,C),f(R,C,null),t(C,rt),t(C,ce),t(l,nt),t(l,L),f(Z,L,null),t(L,ot),t(L,ue),t(l,it),t(l,O),f(K,O,null),t(O,dt),t(O,me),t(l,lt),t(l,I),f(Q,I,null),t(I,at),t(I,fe),t(l,pt),t(l,E),f(X,E,null),t(E,ct),t(E,he),t(l,ut),t(l,H),f(ee,H,null),t(H,mt),t(H,ge),p(e,Ce,o),f(te,e,o),p(e,Le,o),p(e,T,o),f(se,T,null),t(T,ft),t(T,_e),p(e,Oe,o),f(re,e,o),p(e,Ie,o),p(e,Se,o),Ee=!0},p(e,[o]){const a={};o&2&&(a.$$scope={dirty:o,ctx:e}),y.$set(a)},i(e){Ee||(h(q.$$.fragment,e),h(A.$$.fragment,e),h(U.$$.fragment,e),h(j.$$.fragment,e),h(B.$$.fragment,e),h(y.$$.fragment,e),h(G.$$.fragment,e),h(Y.$$.fragment,e),h(J.$$.fragment,e),h(R.$$.fragment,e),h(Z.$$.fragment,e),h(K.$$.fragment,e),h(Q.$$.fragment,e),h(X.$$.fragment,e),h(ee.$$.fragment,e),h(te.$$.fragment,e),h(se.$$.fragment,e),h(re.$$.fragment,e),Ee=!0)},o(e){g(q.$$.fragment,e),g(A.$$.fragment,e),g(U.$$.fragment,e),g(j.$$.fragment,e),g(B.$$.fragment,e),g(y.$$.fragment,e),g(G.$$.fragment,e),g(Y.$$.fragment,e),g(J.$$.fragment,e),g(R.$$.fragment,e),g(Z.$$.fragment,e),g(K.$$.fragment,e),g(Q.$$.fragment,e),g(X.$$.fragment,e),g(ee.$$.fragment,e),g(te.$$.fragment,e),g(se.$$.fragment,e),g(re.$$.fragment,e),Ee=!1},d(e){e&&(s(k),s($),s(ne),s(xe),s(N),s($e),s(V),s(De),s(z),s(Te),s(ye),s(F),s(Pe),s(W),s(Me),s(we),s(l),s(Ce),s(Le),s(T),s(Oe),s(Ie),s(Se)),s(v),_(q,e),_(A,e),_(U,e),_(j),_(B),_(y),_(G),_(Y),_(J),_(R),_(Z),_(K),_(Q),_(X),_(ee),_(te,e),_(se),_(re,e)}}}const Ut='{"title":"DPMSolverSinglestepScheduler","local":"dpmsolversinglestepscheduler","sections":[{"title":"Tips","local":"tips","sections":[],"depth":2},{"title":"DPMSolverSinglestepScheduler","local":"diffusers.DPMSolverSinglestepScheduler","sections":[],"depth":2},{"title":"SchedulerOutput","local":"diffusers.schedulers.scheduling_utils.SchedulerOutput","sections":[],"depth":2}],"depth":1}';function jt(be){return Ht(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Zt extends qt{constructor(v){super(),Nt(this,v,jt,Wt,Et,{})}}export{Zt as component};

Xet Storage Details

Size:
35.9 kB
·
Xet hash:
35919d51b5f6c0bfd262fedcbef87ef1f47793cc34f157d9a8c81274129847b7

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.