Buckets:
| import{s as Gn,o as Nn,n as pn}from"../chunks/scheduler.8c3d61f6.js";import{S as An,i as Vn,g as r,s as a,r as m,A as qn,h as l,f as t,c as i,j as x,u as g,x as c,k as M,y as s,a as o,v as u,d as f,t as h,w as _}from"../chunks/index.da70eac4.js";import{T as Hn}from"../chunks/Tip.6f698f24.js";import{D as J}from"../chunks/Docstring.634d8861.js";import{C as Zt}from"../chunks/CodeBlock.a9c4becf.js";import{E as Ln}from"../chunks/ExampleCodeBlock.f879b663.js";import{H as Ce,E as Rn}from"../chunks/getInferenceSnippets.ea1775db.js";function Xn(j){let d,U='Make sure to check out the Schedulers <a href="../../using-diffusers/schedulers">guide</a> to learn how to explore the tradeoff between scheduler speed and quality, and see the <a href="../../using-diffusers/loading#reuse-a-pipeline">reuse components across pipelines</a> section to learn how to efficiently load the same components into multiple pipelines.';return{c(){d=r("p"),d.innerHTML=U},l(y){d=l(y,"P",{"data-svelte-h":!0}),c(d)!=="svelte-1qn15hi"&&(d.innerHTML=U)},m(y,b){o(y,d,b)},p:pn,d(y){y&&t(d)}}}function Fn(j){let d,U="Examples:",y,b,T;return b=new Zt({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwU2FuYVNwcmludFBpcGVsaW5lJTBBJTBBcGlwZSUyMCUzRCUyMFNhbmFTcHJpbnRQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyRWZmaWNpZW50LUxhcmdlLU1vZGVsJTJGU2FuYV9TcHJpbnRfMS42Ql8xMDI0cHhfZGlmZnVzZXJzJTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5iZmxvYXQxNiUwQSklMEFwaXBlLnRvKCUyMmN1ZGElMjIpJTBBJTBBaW1hZ2UlMjAlM0QlMjBwaXBlKHByb21wdCUzRCUyMmElMjB0aW55JTIwYXN0cm9uYXV0JTIwaGF0Y2hpbmclMjBmcm9tJTIwYW4lMjBlZ2clMjBvbiUyMHRoZSUyMG1vb24lMjIpJTVCMCU1RCUwQWltYWdlJTVCMCU1RC5zYXZlKCUyMm91dHB1dC5wbmclMjIp",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> SanaSprintPipeline | |
| <span class="hljs-meta">>>> </span>pipe = SanaSprintPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"Efficient-Large-Model/Sana_Sprint_1.6B_1024px_diffusers"</span>, torch_dtype=torch.bfloat16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>image = pipe(prompt=<span class="hljs-string">"a tiny astronaut hatching from an egg on the moon"</span>)[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>image[<span class="hljs-number">0</span>].save(<span class="hljs-string">"output.png"</span>)`,wrap:!1}}),{c(){d=r("p"),d.textContent=U,y=a(),m(b.$$.fragment)},l(p){d=l(p,"P",{"data-svelte-h":!0}),c(d)!=="svelte-kvfsh7"&&(d.textContent=U),y=i(p),g(b.$$.fragment,p)},m(p,I){o(p,d,I),o(p,y,I),u(b,p,I),T=!0},p:pn,i(p){T||(f(b.$$.fragment,p),T=!0)},o(p){h(b.$$.fragment,p),T=!1},d(p){p&&(t(d),t(y)),_(b,p)}}}function Dn(j){let d,U="Examples:",y,b,T;return b=new Zt({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwU2FuYVNwcmludEltZzJJbWdQaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMubG9hZGluZ191dGlscyUyMGltcG9ydCUyMGxvYWRfaW1hZ2UlMEElMEFwaXBlJTIwJTNEJTIwU2FuYVNwcmludEltZzJJbWdQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyRWZmaWNpZW50LUxhcmdlLU1vZGVsJTJGU2FuYV9TcHJpbnRfMS42Ql8xMDI0cHhfZGlmZnVzZXJzJTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5iZmxvYXQxNiUwQSklMEFwaXBlLnRvKCUyMmN1ZGElMjIpJTBBJTBBaW1hZ2UlMjAlM0QlMjBsb2FkX2ltYWdlKCUwQSUyMCUyMCUyMCUyMCUyMmh0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZkYXRhc2V0cyUyRmh1Z2dpbmdmYWNlJTJGZG9jdW1lbnRhdGlvbi1pbWFnZXMlMkZyZXNvbHZlJTJGbWFpbiUyRmRpZmZ1c2VycyUyRnBlbmd1aW4ucG5nJTIyJTBBKSUwQSUwQSUwQWltYWdlJTIwJTNEJTIwcGlwZShwcm9tcHQlM0QlMjJhJTIwY3V0ZSUyMHBpbmslMjBiZWFyJTIyJTJDJTIwaW1hZ2UlM0RpbWFnZSUyQyUyMHN0cmVuZ3RoJTNEMC41JTJDJTIwaGVpZ2h0JTNEODMyJTJDJTIwd2lkdGglM0Q0ODApLmltYWdlcyU1QjAlNUQlMEFpbWFnZSU1QjAlNUQuc2F2ZSglMjJvdXRwdXQucG5nJTIyKQ==",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> SanaSprintImg2ImgPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils.loading_utils <span class="hljs-keyword">import</span> load_image | |
| <span class="hljs-meta">>>> </span>pipe = SanaSprintImg2ImgPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"Efficient-Large-Model/Sana_Sprint_1.6B_1024px_diffusers"</span>, torch_dtype=torch.bfloat16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>image = load_image( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/penguin.png"</span> | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>image = pipe(prompt=<span class="hljs-string">"a cute pink bear"</span>, image=image, strength=<span class="hljs-number">0.5</span>, height=<span class="hljs-number">832</span>, width=<span class="hljs-number">480</span>).images[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>image[<span class="hljs-number">0</span>].save(<span class="hljs-string">"output.png"</span>)`,wrap:!1}}),{c(){d=r("p"),d.textContent=U,y=a(),m(b.$$.fragment)},l(p){d=l(p,"P",{"data-svelte-h":!0}),c(d)!=="svelte-kvfsh7"&&(d.textContent=U),y=i(p),g(b.$$.fragment,p)},m(p,I){o(p,d,I),o(p,y,I),u(b,p,I),T=!0},p:pn,i(p){T||(f(b.$$.fragment,p),T=!0)},o(p){h(b.$$.fragment,p),T=!1},d(p){p&&(t(d),t(y)),_(b,p)}}}function zn(j){let d,U,y,b,T,p,I,dn='<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>',ze,D,cn='<a href="https://huggingface.co/papers/2503.09641" rel="nofollow">SANA-Sprint: One-Step Diffusion with Continuous-Time Consistency Distillation</a> from NVIDIA, MIT HAN Lab, and Hugging Face by Junsong Chen, Shuchen Xue, Yuyang Zhao, Jincheng Yu, Sayak Paul, Junyu Chen, Han Cai, Enze Xie, Song Han',Qe,z,mn="The abstract from the paper is:",Ye,Q,gn="<em>This paper presents SANA-Sprint, an efficient diffusion model for ultra-fast text-to-image (T2I) generation. SANA-Sprint is built on a pre-trained foundation model and augmented with hybrid distillation, dramatically reducing inference steps from 20 to 1-4. We introduce three key innovations: (1) We propose a training-free approach that transforms a pre-trained flow-matching model for continuous-time consistency distillation (sCM), eliminating costly training from scratch and achieving high training efficiency. Our hybrid distillation strategy combines sCM with latent adversarial distillation (LADD): sCM ensures alignment with the teacher model, while LADD enhances single-step generation fidelity. (2) SANA-Sprint is a unified step-adaptive model that achieves high-quality generation in 1-4 steps, eliminating step-specific training and improving efficiency. (3) We integrate ControlNet with SANA-Sprint for real-time interactive image generation, enabling instant visual feedback for user interaction. SANA-Sprint establishes a new Pareto frontier in speed-quality tradeoffs, achieving state-of-the-art performance with 7.59 FID and 0.74 GenEval in only 1 step — outperforming FLUX-schnell (7.94 FID / 0.71 GenEval) while being 10× faster (0.1s vs 1.1s on H100). It also achieves 0.1s (T2I) and 0.25s (ControlNet) latency for 1024×1024 images on H100, and 0.31s (T2I) on an RTX 4090, showcasing its exceptional efficiency and potential for AI-powered consumer applications (AIPC). Code and pre-trained models will be open-sourced.</em>",Oe,E,Ke,Y,un='This pipeline was contributed by <a href="https://github.com/lawrence-cj" rel="nofollow">lawrence-cj</a>, <a href="https://github.com/scxue" rel="nofollow">shuchen Xue</a> and <a href="https://github.com/xieenze" rel="nofollow">Enze Xie</a>. The original codebase can be found <a href="https://github.com/NVlabs/Sana" rel="nofollow">here</a>. The original weights can be found under <a href="https://huggingface.co/Efficient-Large-Model/" rel="nofollow">hf.co/Efficient-Large-Model</a>.',et,O,fn="Available models:",tt,K,hn='<thead><tr><th align="center">Model</th> <th align="center">Recommended dtype</th></tr></thead> <tbody><tr><td align="center"><a href="https://huggingface.co/Efficient-Large-Model/Sana_Sprint_1.6B_1024px_diffusers" rel="nofollow"><code>Efficient-Large-Model/Sana_Sprint_1.6B_1024px_diffusers</code></a></td> <td align="center"><code>torch.bfloat16</code></td></tr> <tr><td align="center"><a href="https://huggingface.co/Efficient-Large-Model/Sana_Sprint_0.6B_1024px_diffusers" rel="nofollow"><code>Efficient-Large-Model/Sana_Sprint_0.6B_1024px_diffusers</code></a></td> <td align="center"><code>torch.bfloat16</code></td></tr></tbody>',nt,ee,_n='Refer to <a href="https://huggingface.co/collections/Efficient-Large-Model/sana-sprint-67d6810d65235085b3b17c76" rel="nofollow">this</a> collection for more information.',at,te,bn="Note: The recommended dtype mentioned is for the transformer weights. The text encoder must stay in <code>torch.bfloat16</code> and VAE weights must stay in <code>torch.bfloat16</code> or <code>torch.float32</code> for the model to work correctly. Please refer to the inference example below to see how to load the model with the recommended dtype.",it,ne,st,ae,yn="Quantization helps reduce the memory requirements of very large models by storing model weights in a lower precision data type. However, quantization may have varying impact on video quality depending on the video model.",ot,ie,wn='Refer to the <a href="../../quantization/overview">Quantization</a> overview to learn more about supported quantization backends and selecting a quantization backend that supports your use case. The example below demonstrates how to load a quantized <a href="/docs/diffusers/pr_12403/en/api/pipelines/sana_sprint#diffusers.SanaSprintPipeline">SanaSprintPipeline</a> for inference with bitsandbytes.',rt,se,lt,oe,pt,re,vn="Users can tweak the <code>max_timesteps</code> value for experimenting with the visual quality of the generated outputs. The default <code>max_timesteps</code> value was obtained with an inference-time search process. For more details about it, check out the paper.",dt,le,ct,pe,Sn='The <a href="/docs/diffusers/pr_12403/en/api/pipelines/sana_sprint#diffusers.SanaSprintImg2ImgPipeline">SanaSprintImg2ImgPipeline</a> is a pipeline for image-to-image generation. It takes an input image and a prompt, and generates a new image based on the input image and the prompt.',mt,de,gt,ce,ut,w,me,kt,Ze,Mn='Pipeline for text-to-image generation using <a href="https://huggingface.co/papers/2503.09641" rel="nofollow">SANA-Sprint</a>.',jt,P,ge,Et,ke,Tn="Function invoked when calling the pipeline for generation.",Bt,B,Wt,W,ue,Lt,je,xn=`Disable sliced VAE decoding. If <code>enable_vae_slicing</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,Gt,L,fe,Nt,Ee,In=`Disable tiled VAE decoding. If <code>enable_vae_tiling</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,At,G,he,Vt,Be,$n=`Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to | |
| compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.`,qt,N,_e,Ht,We,Jn=`Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to | |
| compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow | |
| processing larger images.`,Rt,A,be,Xt,Le,Un="Encodes the prompt into text encoder hidden states.",ft,ye,ht,v,we,Ft,Ge,Pn='Pipeline for text-to-image generation using <a href="https://huggingface.co/papers/2503.09641" rel="nofollow">SANA-Sprint</a>.',Dt,C,ve,zt,Ne,Cn="Function invoked when calling the pipeline for generation.",Qt,V,Yt,q,Se,Ot,Ae,Zn=`Disable sliced VAE decoding. If <code>enable_vae_slicing</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,Kt,H,Me,en,Ve,kn=`Disable tiled VAE decoding. If <code>enable_vae_tiling</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,tn,R,Te,nn,qe,jn=`Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to | |
| compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.`,an,X,xe,sn,He,En=`Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to | |
| compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow | |
| processing larger images.`,on,F,Ie,rn,Re,Bn="Encodes the prompt into text encoder hidden states.",_t,$e,bt,Z,Je,ln,Xe,Wn="Output class for Sana pipelines.",yt,Ue,wt,De,vt;return T=new Ce({props:{title:"SANA-Sprint",local:"sana-sprint",headingTag:"h1"}}),E=new Hn({props:{$$slots:{default:[Xn]},$$scope:{ctx:j}}}),ne=new Ce({props:{title:"Quantization",local:"quantization",headingTag:"h2"}}),se=new Zt({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwQml0c0FuZEJ5dGVzQ29uZmlnJTIwYXMlMjBEaWZmdXNlcnNCaXRzQW5kQnl0ZXNDb25maWclMkMlMjBTYW5hVHJhbnNmb3JtZXIyRE1vZGVsJTJDJTIwU2FuYVNwcmludFBpcGVsaW5lJTBBZnJvbSUyMHRyYW5zZm9ybWVycyUyMGltcG9ydCUyMEJpdHNBbmRCeXRlc0NvbmZpZyUyMGFzJTIwQml0c0FuZEJ5dGVzQ29uZmlnJTJDJTIwQXV0b01vZGVsJTBBJTBBcXVhbnRfY29uZmlnJTIwJTNEJTIwQml0c0FuZEJ5dGVzQ29uZmlnKGxvYWRfaW5fOGJpdCUzRFRydWUpJTBBdGV4dF9lbmNvZGVyXzhiaXQlMjAlM0QlMjBBdXRvTW9kZWwuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkVmZmljaWVudC1MYXJnZS1Nb2RlbCUyRlNhbmFfU3ByaW50XzEuNkJfMTAyNHB4X2RpZmZ1c2VycyUyMiUyQyUwQSUyMCUyMCUyMCUyMHN1YmZvbGRlciUzRCUyMnRleHRfZW5jb2RlciUyMiUyQyUwQSUyMCUyMCUyMCUyMHF1YW50aXphdGlvbl9jb25maWclM0RxdWFudF9jb25maWclMkMlMEElMjAlMjAlMjAlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2JTJDJTBBKSUwQSUwQXF1YW50X2NvbmZpZyUyMCUzRCUyMERpZmZ1c2Vyc0JpdHNBbmRCeXRlc0NvbmZpZyhsb2FkX2luXzhiaXQlM0RUcnVlKSUwQXRyYW5zZm9ybWVyXzhiaXQlMjAlM0QlMjBTYW5hVHJhbnNmb3JtZXIyRE1vZGVsLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJFZmZpY2llbnQtTGFyZ2UtTW9kZWwlMkZTYW5hX1NwcmludF8xLjZCXzEwMjRweF9kaWZmdXNlcnMlMjIlMkMlMEElMjAlMjAlMjAlMjBzdWJmb2xkZXIlM0QlMjJ0cmFuc2Zvcm1lciUyMiUyQyUwQSUyMCUyMCUyMCUyMHF1YW50aXphdGlvbl9jb25maWclM0RxdWFudF9jb25maWclMkMlMEElMjAlMjAlMjAlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2JTJDJTBBKSUwQSUwQXBpcGVsaW5lJTIwJTNEJTIwU2FuYVNwcmludFBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJFZmZpY2llbnQtTGFyZ2UtTW9kZWwlMkZTYW5hX1NwcmludF8xLjZCXzEwMjRweF9kaWZmdXNlcnMlMjIlMkMlMEElMjAlMjAlMjAlMjB0ZXh0X2VuY29kZXIlM0R0ZXh0X2VuY29kZXJfOGJpdCUyQyUwQSUyMCUyMCUyMCUyMHRyYW5zZm9ybWVyJTNEdHJhbnNmb3JtZXJfOGJpdCUyQyUwQSUyMCUyMCUyMCUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYlMkMlMEElMjAlMjAlMjAlMjBkZXZpY2VfbWFwJTNEJTIyYmFsYW5jZWQlMjIlMkMlMEEpJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTIyYSUyMHRpbnklMjBhc3Ryb25hdXQlMjBoYXRjaGluZyUyMGZyb20lMjBhbiUyMGVnZyUyMG9uJTIwdGhlJTIwbW9vbiUyMiUwQWltYWdlJTIwJTNEJTIwcGlwZWxpbmUocHJvbXB0KS5pbWFnZXMlNUIwJTVEJTBBaW1hZ2Uuc2F2ZSglMjJzYW5hLnBuZyUyMik=",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> BitsAndBytesConfig <span class="hljs-keyword">as</span> DiffusersBitsAndBytesConfig, SanaTransformer2DModel, SanaSprintPipeline | |
| <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BitsAndBytesConfig <span class="hljs-keyword">as</span> BitsAndBytesConfig, AutoModel | |
| quant_config = BitsAndBytesConfig(load_in_8bit=<span class="hljs-literal">True</span>) | |
| text_encoder_8bit = AutoModel.from_pretrained( | |
| <span class="hljs-string">"Efficient-Large-Model/Sana_Sprint_1.6B_1024px_diffusers"</span>, | |
| subfolder=<span class="hljs-string">"text_encoder"</span>, | |
| quantization_config=quant_config, | |
| torch_dtype=torch.bfloat16, | |
| ) | |
| quant_config = DiffusersBitsAndBytesConfig(load_in_8bit=<span class="hljs-literal">True</span>) | |
| transformer_8bit = SanaTransformer2DModel.from_pretrained( | |
| <span class="hljs-string">"Efficient-Large-Model/Sana_Sprint_1.6B_1024px_diffusers"</span>, | |
| subfolder=<span class="hljs-string">"transformer"</span>, | |
| quantization_config=quant_config, | |
| torch_dtype=torch.bfloat16, | |
| ) | |
| pipeline = SanaSprintPipeline.from_pretrained( | |
| <span class="hljs-string">"Efficient-Large-Model/Sana_Sprint_1.6B_1024px_diffusers"</span>, | |
| text_encoder=text_encoder_8bit, | |
| transformer=transformer_8bit, | |
| torch_dtype=torch.bfloat16, | |
| device_map=<span class="hljs-string">"balanced"</span>, | |
| ) | |
| prompt = <span class="hljs-string">"a tiny astronaut hatching from an egg on the moon"</span> | |
| image = pipeline(prompt).images[<span class="hljs-number">0</span>] | |
| image.save(<span class="hljs-string">"sana.png"</span>)`,wrap:!1}}),oe=new Ce({props:{title:"Setting max_timesteps",local:"setting-maxtimesteps",headingTag:"h2"}}),le=new Ce({props:{title:"Image to Image",local:"image-to-image",headingTag:"h2"}}),de=new Zt({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwU2FuYVNwcmludEltZzJJbWdQaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMubG9hZGluZ191dGlscyUyMGltcG9ydCUyMGxvYWRfaW1hZ2UlMEElMEFpbWFnZSUyMCUzRCUyMGxvYWRfaW1hZ2UoJTBBJTIwJTIwJTIwJTIwJTIyaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRhdGFzZXRzJTJGaHVnZ2luZ2ZhY2UlMkZkb2N1bWVudGF0aW9uLWltYWdlcyUyRnJlc29sdmUlMkZtYWluJTJGZGlmZnVzZXJzJTJGcGVuZ3Vpbi5wbmclMjIlMEEpJTBBJTBBcGlwZSUyMCUzRCUyMFNhbmFTcHJpbnRJbWcySW1nUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkVmZmljaWVudC1MYXJnZS1Nb2RlbCUyRlNhbmFfU3ByaW50XzEuNkJfMTAyNHB4X2RpZmZ1c2VycyUyMiUyQyUyMCUwQSUyMCUyMCUyMCUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYpJTBBcGlwZS50byglMjJjdWRhJTIyKSUwQSUwQWltYWdlJTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjBwcm9tcHQlM0QlMjJhJTIwY3V0ZSUyMHBpbmslMjBiZWFyJTIyJTJDJTIwJTBBJTIwJTIwJTIwJTIwaW1hZ2UlM0RpbWFnZSUyQyUyMCUwQSUyMCUyMCUyMCUyMHN0cmVuZ3RoJTNEMC41JTJDJTIwJTBBJTIwJTIwJTIwJTIwaGVpZ2h0JTNEODMyJTJDJTIwJTBBJTIwJTIwJTIwJTIwd2lkdGglM0Q0ODAlMEEpLmltYWdlcyU1QjAlNUQlMEFpbWFnZS5zYXZlKCUyMm91dHB1dC5wbmclMjIp",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> SanaSprintImg2ImgPipeline | |
| <span class="hljs-keyword">from</span> diffusers.utils.loading_utils <span class="hljs-keyword">import</span> load_image | |
| image = load_image( | |
| <span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/penguin.png"</span> | |
| ) | |
| pipe = SanaSprintImg2ImgPipeline.from_pretrained( | |
| <span class="hljs-string">"Efficient-Large-Model/Sana_Sprint_1.6B_1024px_diffusers"</span>, | |
| torch_dtype=torch.bfloat16) | |
| pipe.to(<span class="hljs-string">"cuda"</span>) | |
| image = pipe( | |
| prompt=<span class="hljs-string">"a cute pink bear"</span>, | |
| image=image, | |
| strength=<span class="hljs-number">0.5</span>, | |
| height=<span class="hljs-number">832</span>, | |
| width=<span class="hljs-number">480</span> | |
| ).images[<span class="hljs-number">0</span>] | |
| image.save(<span class="hljs-string">"output.png"</span>)`,wrap:!1}}),ce=new Ce({props:{title:"SanaSprintPipeline",local:"diffusers.SanaSprintPipeline",headingTag:"h2"}}),me=new J({props:{name:"class diffusers.SanaSprintPipeline",anchor:"diffusers.SanaSprintPipeline",parameters:[{name:"tokenizer",val:": typing.Union[transformers.models.gemma.tokenization_gemma.GemmaTokenizer, transformers.models.gemma.tokenization_gemma_fast.GemmaTokenizerFast]"},{name:"text_encoder",val:": Gemma2PreTrainedModel"},{name:"vae",val:": AutoencoderDC"},{name:"transformer",val:": SanaTransformer2DModel"},{name:"scheduler",val:": DPMSolverMultistepScheduler"}],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/sana/pipeline_sana_sprint.py#L141"}}),ge=new J({props:{name:"__call__",anchor:"diffusers.SanaSprintPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"num_inference_steps",val:": int = 2"},{name:"timesteps",val:": typing.List[int] = None"},{name:"max_timesteps",val:": float = 1.5708"},{name:"intermediate_timesteps",val:": float = 1.3"},{name:"guidance_scale",val:": float = 4.5"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"height",val:": int = 1024"},{name:"width",val:": int = 1024"},{name:"eta",val:": float = 0.0"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"clean_caption",val:": bool = False"},{name:"use_resolution_binning",val:": bool = True"},{name:"attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 300"},{name:"complex_human_instruction",val:`: typing.List[str] = ["Given a user prompt, generate an 'Enhanced prompt' that provides detailed visual descriptions suitable for image generation. Evaluate the level of detail in the user prompt:", '- If the prompt is simple, focus on adding specifics about colors, shapes, sizes, textures, and spatial relationships to create vivid and concrete scenes.', '- If the prompt is already detailed, refine and enhance the existing details slightly without overcomplicating.', 'Here are examples of how to transform or refine prompts:', '- User Prompt: A cat sleeping -> Enhanced: A small, fluffy white cat curled up in a round shape, sleeping peacefully on a warm sunny windowsill, surrounded by pots of blooming red flowers.', '- User Prompt: A busy city street -> Enhanced: A bustling city street scene at dusk, featuring glowing street lamps, a diverse crowd of people in colorful clothing, and a double-decker bus passing by towering glass skyscrapers.', 'Please generate only the enhanced description for the prompt below and avoid including any additional commentary or evaluations:', 'User Prompt: ']`}],parametersDescription:[{anchor:"diffusers.SanaSprintPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.SanaSprintPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 20) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.SanaSprintPipeline.__call__.max_timesteps",description:`<strong>max_timesteps</strong> (<code>float</code>, <em>optional</em>, defaults to 1.57080) — | |
| The maximum timestep value used in the SCM scheduler.`,name:"max_timesteps"},{anchor:"diffusers.SanaSprintPipeline.__call__.intermediate_timesteps",description:`<strong>intermediate_timesteps</strong> (<code>float</code>, <em>optional</em>, defaults to 1.3) — | |
| The intermediate timestep value used in SCM scheduler (only used when num_inference_steps=2).`,name:"intermediate_timesteps"},{anchor:"diffusers.SanaSprintPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) — | |
| Custom timesteps to use for the denoising process with schedulers which support a <code>timesteps</code> argument | |
| in their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is | |
| passed will be used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.SanaSprintPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 4.5) — | |
| Embedded guiddance scale is enabled by setting <code>guidance_scale</code> > 1. Higher <code>guidance_scale</code> encourages | |
| a model to generate images more aligned with <code>prompt</code> at the expense of lower image quality.</p> | |
| <p>Guidance-distilled models approximates true classifer-free guidance for <code>guidance_scale</code> > 1. Refer to | |
| the <a href="https://huggingface.co/papers/2210.03142" rel="nofollow">paper</a> to learn more.`,name:"guidance_scale"},{anchor:"diffusers.SanaSprintPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.SanaSprintPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size) — | |
| The height in pixels of the generated image.`,name:"height"},{anchor:"diffusers.SanaSprintPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size) — | |
| The width in pixels of the generated image.`,name:"width"},{anchor:"diffusers.SanaSprintPipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) — | |
| Corresponds to parameter eta (η) in the DDIM paper: <a href="https://huggingface.co/papers/2010.02502" rel="nofollow">https://huggingface.co/papers/2010.02502</a>. Only | |
| applies to <a href="/docs/diffusers/pr_12403/en/api/schedulers/ddim#diffusers.DDIMScheduler">schedulers.DDIMScheduler</a>, will be ignored for others.`,name:"eta"},{anchor:"diffusers.SanaSprintPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.SanaSprintPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will be generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.SanaSprintPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.SanaSprintPipeline.__call__.prompt_attention_mask",description:"<strong>prompt_attention_mask</strong> (<code>torch.Tensor</code>, <em>optional</em>) — Pre-generated attention mask for text embeddings.",name:"prompt_attention_mask"},{anchor:"diffusers.SanaSprintPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.SanaSprintPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.stable_diffusion.IFPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.SanaSprintPipeline.__call__.attention_kwargs",description:`<strong>attention_kwargs</strong> — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"attention_kwargs"},{anchor:"diffusers.SanaSprintPipeline.__call__.clean_caption",description:`<strong>clean_caption</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to clean the caption before creating embeddings. Requires <code>beautifulsoup4</code> and <code>ftfy</code> to | |
| be installed. If the dependencies are not installed, the embeddings will be created from the raw | |
| prompt.`,name:"clean_caption"},{anchor:"diffusers.SanaSprintPipeline.__call__.use_resolution_binning",description:`<strong>use_resolution_binning</strong> (<code>bool</code> defaults to <code>True</code>) — | |
| If set to <code>True</code>, the requested height and width are first mapped to the closest resolutions using | |
| <code>ASPECT_RATIO_1024_BIN</code>. After the produced latents are decoded into images, they are resized back to | |
| the requested resolution. Useful for generating non-square images.`,name:"use_resolution_binning"},{anchor:"diffusers.SanaSprintPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.SanaSprintPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.SanaSprintPipeline.__call__.max_sequence_length",description:`<strong>max_sequence_length</strong> (<code>int</code> defaults to <code>300</code>) — | |
| Maximum sequence length to use with the <code>prompt</code>.`,name:"max_sequence_length"},{anchor:"diffusers.SanaSprintPipeline.__call__.complex_human_instruction",description:`<strong>complex_human_instruction</strong> (<code>List[str]</code>, <em>optional</em>) — | |
| Instructions for complex human attention: | |
| <a href="https://github.com/NVlabs/Sana/blob/main/configs/sana_app_config/Sana_1600M_app.yaml#L55" rel="nofollow">https://github.com/NVlabs/Sana/blob/main/configs/sana_app_config/Sana_1600M_app.yaml#L55</a>.`,name:"complex_human_instruction"}],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/sana/pipeline_sana_sprint.py#L615",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>If <code>return_dict</code> is <code>True</code>, <a | |
| href="/docs/diffusers/pr_12403/en/api/pipelines/controlnet_sana#diffusers.pipelines.sana.pipeline_output.SanaPipelineOutput" | |
| >SanaPipelineOutput</a> is returned, | |
| otherwise a <code>tuple</code> is returned where the first element is a list with the generated images</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><a | |
| href="/docs/diffusers/pr_12403/en/api/pipelines/controlnet_sana#diffusers.pipelines.sana.pipeline_output.SanaPipelineOutput" | |
| >SanaPipelineOutput</a> or <code>tuple</code></p> | |
| `}}),B=new Ln({props:{anchor:"diffusers.SanaSprintPipeline.__call__.example",$$slots:{default:[Fn]},$$scope:{ctx:j}}}),ue=new J({props:{name:"disable_vae_slicing",anchor:"diffusers.SanaSprintPipeline.disable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/sana/pipeline_sana_sprint.py#L187"}}),fe=new J({props:{name:"disable_vae_tiling",anchor:"diffusers.SanaSprintPipeline.disable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/sana/pipeline_sana_sprint.py#L214"}}),he=new J({props:{name:"enable_vae_slicing",anchor:"diffusers.SanaSprintPipeline.enable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/sana/pipeline_sana_sprint.py#L174"}}),_e=new J({props:{name:"enable_vae_tiling",anchor:"diffusers.SanaSprintPipeline.enable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/sana/pipeline_sana_sprint.py#L200"}}),be=new J({props:{name:"encode_prompt",anchor:"diffusers.SanaSprintPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"num_images_per_prompt",val:": int = 1"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"clean_caption",val:": bool = False"},{name:"max_sequence_length",val:": int = 300"},{name:"complex_human_instruction",val:": typing.Optional[typing.List[str]] = None"},{name:"lora_scale",val:": typing.Optional[float] = None"}],parametersDescription:[{anchor:"diffusers.SanaSprintPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.SanaSprintPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.SanaSprintPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>, <em>optional</em>): | |
| torch device to place the resulting embeddings on`,name:"device"},{anchor:"diffusers.SanaSprintPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.SanaSprintPipeline.encode_prompt.clean_caption",description:`<strong>clean_caption</strong> (<code>bool</code>, defaults to <code>False</code>) — | |
| If <code>True</code>, the function will preprocess and clean the provided caption before encoding.`,name:"clean_caption"},{anchor:"diffusers.SanaSprintPipeline.encode_prompt.max_sequence_length",description:"<strong>max_sequence_length</strong> (<code>int</code>, defaults to 300) — Maximum sequence length to use for the prompt.",name:"max_sequence_length"},{anchor:"diffusers.SanaSprintPipeline.encode_prompt.complex_human_instruction",description:`<strong>complex_human_instruction</strong> (<code>list[str]</code>, defaults to <code>complex_human_instruction</code>) — | |
| If <code>complex_human_instruction</code> is not empty, the function will use the complex Human instruction for | |
| the prompt.`,name:"complex_human_instruction"}],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/sana/pipeline_sana_sprint.py#L286"}}),ye=new Ce({props:{title:"SanaSprintImg2ImgPipeline",local:"diffusers.SanaSprintImg2ImgPipeline",headingTag:"h2"}}),we=new J({props:{name:"class diffusers.SanaSprintImg2ImgPipeline",anchor:"diffusers.SanaSprintImg2ImgPipeline",parameters:[{name:"tokenizer",val:": typing.Union[transformers.models.gemma.tokenization_gemma.GemmaTokenizer, transformers.models.gemma.tokenization_gemma_fast.GemmaTokenizerFast]"},{name:"text_encoder",val:": Gemma2PreTrainedModel"},{name:"vae",val:": AutoencoderDC"},{name:"transformer",val:": SanaTransformer2DModel"},{name:"scheduler",val:": DPMSolverMultistepScheduler"}],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py#L147"}}),ve=new J({props:{name:"__call__",anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"num_inference_steps",val:": int = 2"},{name:"timesteps",val:": typing.List[int] = None"},{name:"max_timesteps",val:": float = 1.5708"},{name:"intermediate_timesteps",val:": float = 1.3"},{name:"guidance_scale",val:": float = 4.5"},{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"strength",val:": float = 0.6"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"height",val:": int = 1024"},{name:"width",val:": int = 1024"},{name:"eta",val:": float = 0.0"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"clean_caption",val:": bool = False"},{name:"use_resolution_binning",val:": bool = True"},{name:"attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 300"},{name:"complex_human_instruction",val:`: typing.List[str] = ["Given a user prompt, generate an 'Enhanced prompt' that provides detailed visual descriptions suitable for image generation. Evaluate the level of detail in the user prompt:", '- If the prompt is simple, focus on adding specifics about colors, shapes, sizes, textures, and spatial relationships to create vivid and concrete scenes.', '- If the prompt is already detailed, refine and enhance the existing details slightly without overcomplicating.', 'Here are examples of how to transform or refine prompts:', '- User Prompt: A cat sleeping -> Enhanced: A small, fluffy white cat curled up in a round shape, sleeping peacefully on a warm sunny windowsill, surrounded by pots of blooming red flowers.', '- User Prompt: A busy city street -> Enhanced: A bustling city street scene at dusk, featuring glowing street lamps, a diverse crowd of people in colorful clothing, and a double-decker bus passing by towering glass skyscrapers.', 'Please generate only the enhanced description for the prompt below and avoid including any additional commentary or evaluations:', 'User Prompt: ']`}],parametersDescription:[{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 20) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.max_timesteps",description:`<strong>max_timesteps</strong> (<code>float</code>, <em>optional</em>, defaults to 1.57080) — | |
| The maximum timestep value used in the SCM scheduler.`,name:"max_timesteps"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.intermediate_timesteps",description:`<strong>intermediate_timesteps</strong> (<code>float</code>, <em>optional</em>, defaults to 1.3) — | |
| The intermediate timestep value used in SCM scheduler (only used when num_inference_steps=2).`,name:"intermediate_timesteps"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) — | |
| Custom timesteps to use for the denoising process with schedulers which support a <code>timesteps</code> argument | |
| in their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is | |
| passed will be used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 4.5) — | |
| Guidance scale as defined in <a href="https://arxiv.org/abs/2207.12598" rel="nofollow">Classifier-Free Diffusion Guidance</a>. | |
| <code>guidance_scale</code> is defined as <code>w</code> of equation 2. of <a href="https://arxiv.org/pdf/2205.11487.pdf" rel="nofollow">Imagen | |
| Paper</a>. Guidance scale is enabled by setting <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images that are closely linked to the text <code>prompt</code>, | |
| usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size) — | |
| The height in pixels of the generated image.`,name:"height"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size) — | |
| The width in pixels of the generated image.`,name:"width"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) — | |
| Corresponds to parameter eta (η) in the DDIM paper: <a href="https://arxiv.org/abs/2010.02502" rel="nofollow">https://arxiv.org/abs/2010.02502</a>. Only applies to | |
| <a href="/docs/diffusers/pr_12403/en/api/schedulers/ddim#diffusers.DDIMScheduler">schedulers.DDIMScheduler</a>, will be ignored for others.`,name:"eta"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will be generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.prompt_attention_mask",description:"<strong>prompt_attention_mask</strong> (<code>torch.Tensor</code>, <em>optional</em>) — Pre-generated attention mask for text embeddings.",name:"prompt_attention_mask"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.stable_diffusion.IFPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.attention_kwargs",description:`<strong>attention_kwargs</strong> — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"attention_kwargs"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.clean_caption",description:`<strong>clean_caption</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to clean the caption before creating embeddings. Requires <code>beautifulsoup4</code> and <code>ftfy</code> to | |
| be installed. If the dependencies are not installed, the embeddings will be created from the raw | |
| prompt.`,name:"clean_caption"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.use_resolution_binning",description:`<strong>use_resolution_binning</strong> (<code>bool</code> defaults to <code>True</code>) — | |
| If set to <code>True</code>, the requested height and width are first mapped to the closest resolutions using | |
| <code>ASPECT_RATIO_1024_BIN</code>. After the produced latents are decoded into images, they are resized back to | |
| the requested resolution. Useful for generating non-square images.`,name:"use_resolution_binning"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.max_sequence_length",description:`<strong>max_sequence_length</strong> (<code>int</code> defaults to <code>300</code>) — | |
| Maximum sequence length to use with the <code>prompt</code>.`,name:"max_sequence_length"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.complex_human_instruction",description:`<strong>complex_human_instruction</strong> (<code>List[str]</code>, <em>optional</em>) — | |
| Instructions for complex human attention: | |
| <a href="https://github.com/NVlabs/Sana/blob/main/configs/sana_app_config/Sana_1600M_app.yaml#L55" rel="nofollow">https://github.com/NVlabs/Sana/blob/main/configs/sana_app_config/Sana_1600M_app.yaml#L55</a>.`,name:"complex_human_instruction"}],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py#L686",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>If <code>return_dict</code> is <code>True</code>, <a | |
| href="/docs/diffusers/pr_12403/en/api/pipelines/controlnet_sana#diffusers.pipelines.sana.pipeline_output.SanaPipelineOutput" | |
| >SanaPipelineOutput</a> is returned, | |
| otherwise a <code>tuple</code> is returned where the first element is a list with the generated images</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><a | |
| href="/docs/diffusers/pr_12403/en/api/pipelines/controlnet_sana#diffusers.pipelines.sana.pipeline_output.SanaPipelineOutput" | |
| >SanaPipelineOutput</a> or <code>tuple</code></p> | |
| `}}),V=new Ln({props:{anchor:"diffusers.SanaSprintImg2ImgPipeline.__call__.example",$$slots:{default:[Dn]},$$scope:{ctx:j}}}),Se=new J({props:{name:"disable_vae_slicing",anchor:"diffusers.SanaSprintImg2ImgPipeline.disable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py#L196"}}),Me=new J({props:{name:"disable_vae_tiling",anchor:"diffusers.SanaSprintImg2ImgPipeline.disable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py#L224"}}),Te=new J({props:{name:"enable_vae_slicing",anchor:"diffusers.SanaSprintImg2ImgPipeline.enable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py#L182"}}),xe=new J({props:{name:"enable_vae_tiling",anchor:"diffusers.SanaSprintImg2ImgPipeline.enable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py#L210"}}),Ie=new J({props:{name:"encode_prompt",anchor:"diffusers.SanaSprintImg2ImgPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"num_images_per_prompt",val:": int = 1"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"clean_caption",val:": bool = False"},{name:"max_sequence_length",val:": int = 300"},{name:"complex_human_instruction",val:": typing.Optional[typing.List[str]] = None"},{name:"lora_scale",val:": typing.Optional[float] = None"}],parametersDescription:[{anchor:"diffusers.SanaSprintImg2ImgPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>, <em>optional</em>): | |
| torch device to place the resulting embeddings on`,name:"device"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.encode_prompt.clean_caption",description:`<strong>clean_caption</strong> (<code>bool</code>, defaults to <code>False</code>) — | |
| If <code>True</code>, the function will preprocess and clean the provided caption before encoding.`,name:"clean_caption"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.encode_prompt.max_sequence_length",description:"<strong>max_sequence_length</strong> (<code>int</code>, defaults to 300) — Maximum sequence length to use for the prompt.",name:"max_sequence_length"},{anchor:"diffusers.SanaSprintImg2ImgPipeline.encode_prompt.complex_human_instruction",description:`<strong>complex_human_instruction</strong> (<code>list[str]</code>, defaults to <code>complex_human_instruction</code>) — | |
| If <code>complex_human_instruction</code> is not empty, the function will use the complex Human instruction for | |
| the prompt.`,name:"complex_human_instruction"}],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/sana/pipeline_sana_sprint_img2img.py#L297"}}),$e=new Ce({props:{title:"SanaPipelineOutput",local:"diffusers.pipelines.sana.pipeline_output.SanaPipelineOutput",headingTag:"h2"}}),Je=new J({props:{name:"class diffusers.pipelines.sana.pipeline_output.SanaPipelineOutput",anchor:"diffusers.pipelines.sana.pipeline_output.SanaPipelineOutput",parameters:[{name:"images",val:": typing.Union[typing.List[PIL.Image.Image], numpy.ndarray]"}],parametersDescription:[{anchor:"diffusers.pipelines.sana.pipeline_output.SanaPipelineOutput.images",description:`<strong>images</strong> (<code>List[PIL.Image.Image]</code> or <code>np.ndarray</code>) — | |
| List of denoised PIL images of length <code>batch_size</code> or numpy array of shape <code>(batch_size, height, width, num_channels)</code>. PIL images or numpy array present the denoised images of the diffusion pipeline.`,name:"images"}],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/sana/pipeline_output.py#L11"}}),Ue=new Rn({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/sana_sprint.md"}}),{c(){d=r("meta"),U=a(),y=r("p"),b=a(),m(T.$$.fragment),p=a(),I=r("div"),I.innerHTML=dn,ze=a(),D=r("p"),D.innerHTML=cn,Qe=a(),z=r("p"),z.textContent=mn,Ye=a(),Q=r("p"),Q.innerHTML=gn,Oe=a(),m(E.$$.fragment),Ke=a(),Y=r("p"),Y.innerHTML=un,et=a(),O=r("p"),O.textContent=fn,tt=a(),K=r("table"),K.innerHTML=hn,nt=a(),ee=r("p"),ee.innerHTML=_n,at=a(),te=r("p"),te.innerHTML=bn,it=a(),m(ne.$$.fragment),st=a(),ae=r("p"),ae.textContent=yn,ot=a(),ie=r("p"),ie.innerHTML=wn,rt=a(),m(se.$$.fragment),lt=a(),m(oe.$$.fragment),pt=a(),re=r("p"),re.innerHTML=vn,dt=a(),m(le.$$.fragment),ct=a(),pe=r("p"),pe.innerHTML=Sn,mt=a(),m(de.$$.fragment),gt=a(),m(ce.$$.fragment),ut=a(),w=r("div"),m(me.$$.fragment),kt=a(),Ze=r("p"),Ze.innerHTML=Mn,jt=a(),P=r("div"),m(ge.$$.fragment),Et=a(),ke=r("p"),ke.textContent=Tn,Bt=a(),m(B.$$.fragment),Wt=a(),W=r("div"),m(ue.$$.fragment),Lt=a(),je=r("p"),je.innerHTML=xn,Gt=a(),L=r("div"),m(fe.$$.fragment),Nt=a(),Ee=r("p"),Ee.innerHTML=In,At=a(),G=r("div"),m(he.$$.fragment),Vt=a(),Be=r("p"),Be.textContent=$n,qt=a(),N=r("div"),m(_e.$$.fragment),Ht=a(),We=r("p"),We.textContent=Jn,Rt=a(),A=r("div"),m(be.$$.fragment),Xt=a(),Le=r("p"),Le.textContent=Un,ft=a(),m(ye.$$.fragment),ht=a(),v=r("div"),m(we.$$.fragment),Ft=a(),Ge=r("p"),Ge.innerHTML=Pn,Dt=a(),C=r("div"),m(ve.$$.fragment),zt=a(),Ne=r("p"),Ne.textContent=Cn,Qt=a(),m(V.$$.fragment),Yt=a(),q=r("div"),m(Se.$$.fragment),Ot=a(),Ae=r("p"),Ae.innerHTML=Zn,Kt=a(),H=r("div"),m(Me.$$.fragment),en=a(),Ve=r("p"),Ve.innerHTML=kn,tn=a(),R=r("div"),m(Te.$$.fragment),nn=a(),qe=r("p"),qe.textContent=jn,an=a(),X=r("div"),m(xe.$$.fragment),sn=a(),He=r("p"),He.textContent=En,on=a(),F=r("div"),m(Ie.$$.fragment),rn=a(),Re=r("p"),Re.textContent=Bn,_t=a(),m($e.$$.fragment),bt=a(),Z=r("div"),m(Je.$$.fragment),ln=a(),Xe=r("p"),Xe.textContent=Wn,yt=a(),m(Ue.$$.fragment),wt=a(),De=r("p"),this.h()},l(e){const n=qn("svelte-u9bgzb",document.head);d=l(n,"META",{name:!0,content:!0}),n.forEach(t),U=i(e),y=l(e,"P",{}),x(y).forEach(t),b=i(e),g(T.$$.fragment,e),p=i(e),I=l(e,"DIV",{class:!0,"data-svelte-h":!0}),c(I)!=="svelte-si9ct8"&&(I.innerHTML=dn),ze=i(e),D=l(e,"P",{"data-svelte-h":!0}),c(D)!=="svelte-2zyjd8"&&(D.innerHTML=cn),Qe=i(e),z=l(e,"P",{"data-svelte-h":!0}),c(z)!=="svelte-1cwsb16"&&(z.textContent=mn),Ye=i(e),Q=l(e,"P",{"data-svelte-h":!0}),c(Q)!=="svelte-rsnwrp"&&(Q.innerHTML=gn),Oe=i(e),g(E.$$.fragment,e),Ke=i(e),Y=l(e,"P",{"data-svelte-h":!0}),c(Y)!=="svelte-1rwl4x2"&&(Y.innerHTML=un),et=i(e),O=l(e,"P",{"data-svelte-h":!0}),c(O)!=="svelte-1bob28v"&&(O.textContent=fn),tt=i(e),K=l(e,"TABLE",{"data-svelte-h":!0}),c(K)!=="svelte-1nolrdp"&&(K.innerHTML=hn),nt=i(e),ee=l(e,"P",{"data-svelte-h":!0}),c(ee)!=="svelte-27y0yu"&&(ee.innerHTML=_n),at=i(e),te=l(e,"P",{"data-svelte-h":!0}),c(te)!=="svelte-5fyxa5"&&(te.innerHTML=bn),it=i(e),g(ne.$$.fragment,e),st=i(e),ae=l(e,"P",{"data-svelte-h":!0}),c(ae)!=="svelte-1ou2pxc"&&(ae.textContent=yn),ot=i(e),ie=l(e,"P",{"data-svelte-h":!0}),c(ie)!=="svelte-rphms0"&&(ie.innerHTML=wn),rt=i(e),g(se.$$.fragment,e),lt=i(e),g(oe.$$.fragment,e),pt=i(e),re=l(e,"P",{"data-svelte-h":!0}),c(re)!=="svelte-1nuwixv"&&(re.innerHTML=vn),dt=i(e),g(le.$$.fragment,e),ct=i(e),pe=l(e,"P",{"data-svelte-h":!0}),c(pe)!=="svelte-1m1b35v"&&(pe.innerHTML=Sn),mt=i(e),g(de.$$.fragment,e),gt=i(e),g(ce.$$.fragment,e),ut=i(e),w=l(e,"DIV",{class:!0});var S=x(w);g(me.$$.fragment,S),kt=i(S),Ze=l(S,"P",{"data-svelte-h":!0}),c(Ze)!=="svelte-19l2afh"&&(Ze.innerHTML=Mn),jt=i(S),P=l(S,"DIV",{class:!0});var k=x(P);g(ge.$$.fragment,k),Et=i(k),ke=l(k,"P",{"data-svelte-h":!0}),c(ke)!=="svelte-v78lg8"&&(ke.textContent=Tn),Bt=i(k),g(B.$$.fragment,k),k.forEach(t),Wt=i(S),W=l(S,"DIV",{class:!0});var Pe=x(W);g(ue.$$.fragment,Pe),Lt=i(Pe),je=l(Pe,"P",{"data-svelte-h":!0}),c(je)!=="svelte-1s3c06i"&&(je.innerHTML=xn),Pe.forEach(t),Gt=i(S),L=l(S,"DIV",{class:!0});var St=x(L);g(fe.$$.fragment,St),Nt=i(St),Ee=l(St,"P",{"data-svelte-h":!0}),c(Ee)!=="svelte-pkn4ui"&&(Ee.innerHTML=In),St.forEach(t),At=i(S),G=l(S,"DIV",{class:!0});var Mt=x(G);g(he.$$.fragment,Mt),Vt=i(Mt),Be=l(Mt,"P",{"data-svelte-h":!0}),c(Be)!=="svelte-14bnrb6"&&(Be.textContent=$n),Mt.forEach(t),qt=i(S),N=l(S,"DIV",{class:!0});var Tt=x(N);g(_e.$$.fragment,Tt),Ht=i(Tt),We=l(Tt,"P",{"data-svelte-h":!0}),c(We)!=="svelte-1xwrf7t"&&(We.textContent=Jn),Tt.forEach(t),Rt=i(S),A=l(S,"DIV",{class:!0});var xt=x(A);g(be.$$.fragment,xt),Xt=i(xt),Le=l(xt,"P",{"data-svelte-h":!0}),c(Le)!=="svelte-16q0ax1"&&(Le.textContent=Un),xt.forEach(t),S.forEach(t),ft=i(e),g(ye.$$.fragment,e),ht=i(e),v=l(e,"DIV",{class:!0});var $=x(v);g(we.$$.fragment,$),Ft=i($),Ge=l($,"P",{"data-svelte-h":!0}),c(Ge)!=="svelte-19l2afh"&&(Ge.innerHTML=Pn),Dt=i($),C=l($,"DIV",{class:!0});var Fe=x(C);g(ve.$$.fragment,Fe),zt=i(Fe),Ne=l(Fe,"P",{"data-svelte-h":!0}),c(Ne)!=="svelte-v78lg8"&&(Ne.textContent=Cn),Qt=i(Fe),g(V.$$.fragment,Fe),Fe.forEach(t),Yt=i($),q=l($,"DIV",{class:!0});var It=x(q);g(Se.$$.fragment,It),Ot=i(It),Ae=l(It,"P",{"data-svelte-h":!0}),c(Ae)!=="svelte-1s3c06i"&&(Ae.innerHTML=Zn),It.forEach(t),Kt=i($),H=l($,"DIV",{class:!0});var $t=x(H);g(Me.$$.fragment,$t),en=i($t),Ve=l($t,"P",{"data-svelte-h":!0}),c(Ve)!=="svelte-pkn4ui"&&(Ve.innerHTML=kn),$t.forEach(t),tn=i($),R=l($,"DIV",{class:!0});var Jt=x(R);g(Te.$$.fragment,Jt),nn=i(Jt),qe=l(Jt,"P",{"data-svelte-h":!0}),c(qe)!=="svelte-14bnrb6"&&(qe.textContent=jn),Jt.forEach(t),an=i($),X=l($,"DIV",{class:!0});var Ut=x(X);g(xe.$$.fragment,Ut),sn=i(Ut),He=l(Ut,"P",{"data-svelte-h":!0}),c(He)!=="svelte-1xwrf7t"&&(He.textContent=En),Ut.forEach(t),on=i($),F=l($,"DIV",{class:!0});var Pt=x(F);g(Ie.$$.fragment,Pt),rn=i(Pt),Re=l(Pt,"P",{"data-svelte-h":!0}),c(Re)!=="svelte-16q0ax1"&&(Re.textContent=Bn),Pt.forEach(t),$.forEach(t),_t=i(e),g($e.$$.fragment,e),bt=i(e),Z=l(e,"DIV",{class:!0});var Ct=x(Z);g(Je.$$.fragment,Ct),ln=i(Ct),Xe=l(Ct,"P",{"data-svelte-h":!0}),c(Xe)!=="svelte-1h3n85u"&&(Xe.textContent=Wn),Ct.forEach(t),yt=i(e),g(Ue.$$.fragment,e),wt=i(e),De=l(e,"P",{}),x(De).forEach(t),this.h()},h(){M(d,"name","hf:doc:metadata"),M(d,"content",Qn),M(I,"class","flex flex-wrap space-x-1"),M(P,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(W,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(L,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(G,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(N,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(A,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(w,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(C,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(H,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(R,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(X,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(F,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(v,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),M(Z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,n){s(document.head,d),o(e,U,n),o(e,y,n),o(e,b,n),u(T,e,n),o(e,p,n),o(e,I,n),o(e,ze,n),o(e,D,n),o(e,Qe,n),o(e,z,n),o(e,Ye,n),o(e,Q,n),o(e,Oe,n),u(E,e,n),o(e,Ke,n),o(e,Y,n),o(e,et,n),o(e,O,n),o(e,tt,n),o(e,K,n),o(e,nt,n),o(e,ee,n),o(e,at,n),o(e,te,n),o(e,it,n),u(ne,e,n),o(e,st,n),o(e,ae,n),o(e,ot,n),o(e,ie,n),o(e,rt,n),u(se,e,n),o(e,lt,n),u(oe,e,n),o(e,pt,n),o(e,re,n),o(e,dt,n),u(le,e,n),o(e,ct,n),o(e,pe,n),o(e,mt,n),u(de,e,n),o(e,gt,n),u(ce,e,n),o(e,ut,n),o(e,w,n),u(me,w,null),s(w,kt),s(w,Ze),s(w,jt),s(w,P),u(ge,P,null),s(P,Et),s(P,ke),s(P,Bt),u(B,P,null),s(w,Wt),s(w,W),u(ue,W,null),s(W,Lt),s(W,je),s(w,Gt),s(w,L),u(fe,L,null),s(L,Nt),s(L,Ee),s(w,At),s(w,G),u(he,G,null),s(G,Vt),s(G,Be),s(w,qt),s(w,N),u(_e,N,null),s(N,Ht),s(N,We),s(w,Rt),s(w,A),u(be,A,null),s(A,Xt),s(A,Le),o(e,ft,n),u(ye,e,n),o(e,ht,n),o(e,v,n),u(we,v,null),s(v,Ft),s(v,Ge),s(v,Dt),s(v,C),u(ve,C,null),s(C,zt),s(C,Ne),s(C,Qt),u(V,C,null),s(v,Yt),s(v,q),u(Se,q,null),s(q,Ot),s(q,Ae),s(v,Kt),s(v,H),u(Me,H,null),s(H,en),s(H,Ve),s(v,tn),s(v,R),u(Te,R,null),s(R,nn),s(R,qe),s(v,an),s(v,X),u(xe,X,null),s(X,sn),s(X,He),s(v,on),s(v,F),u(Ie,F,null),s(F,rn),s(F,Re),o(e,_t,n),u($e,e,n),o(e,bt,n),o(e,Z,n),u(Je,Z,null),s(Z,ln),s(Z,Xe),o(e,yt,n),u(Ue,e,n),o(e,wt,n),o(e,De,n),vt=!0},p(e,[n]){const S={};n&2&&(S.$$scope={dirty:n,ctx:e}),E.$set(S);const k={};n&2&&(k.$$scope={dirty:n,ctx:e}),B.$set(k);const Pe={};n&2&&(Pe.$$scope={dirty:n,ctx:e}),V.$set(Pe)},i(e){vt||(f(T.$$.fragment,e),f(E.$$.fragment,e),f(ne.$$.fragment,e),f(se.$$.fragment,e),f(oe.$$.fragment,e),f(le.$$.fragment,e),f(de.$$.fragment,e),f(ce.$$.fragment,e),f(me.$$.fragment,e),f(ge.$$.fragment,e),f(B.$$.fragment,e),f(ue.$$.fragment,e),f(fe.$$.fragment,e),f(he.$$.fragment,e),f(_e.$$.fragment,e),f(be.$$.fragment,e),f(ye.$$.fragment,e),f(we.$$.fragment,e),f(ve.$$.fragment,e),f(V.$$.fragment,e),f(Se.$$.fragment,e),f(Me.$$.fragment,e),f(Te.$$.fragment,e),f(xe.$$.fragment,e),f(Ie.$$.fragment,e),f($e.$$.fragment,e),f(Je.$$.fragment,e),f(Ue.$$.fragment,e),vt=!0)},o(e){h(T.$$.fragment,e),h(E.$$.fragment,e),h(ne.$$.fragment,e),h(se.$$.fragment,e),h(oe.$$.fragment,e),h(le.$$.fragment,e),h(de.$$.fragment,e),h(ce.$$.fragment,e),h(me.$$.fragment,e),h(ge.$$.fragment,e),h(B.$$.fragment,e),h(ue.$$.fragment,e),h(fe.$$.fragment,e),h(he.$$.fragment,e),h(_e.$$.fragment,e),h(be.$$.fragment,e),h(ye.$$.fragment,e),h(we.$$.fragment,e),h(ve.$$.fragment,e),h(V.$$.fragment,e),h(Se.$$.fragment,e),h(Me.$$.fragment,e),h(Te.$$.fragment,e),h(xe.$$.fragment,e),h(Ie.$$.fragment,e),h($e.$$.fragment,e),h(Je.$$.fragment,e),h(Ue.$$.fragment,e),vt=!1},d(e){e&&(t(U),t(y),t(b),t(p),t(I),t(ze),t(D),t(Qe),t(z),t(Ye),t(Q),t(Oe),t(Ke),t(Y),t(et),t(O),t(tt),t(K),t(nt),t(ee),t(at),t(te),t(it),t(st),t(ae),t(ot),t(ie),t(rt),t(lt),t(pt),t(re),t(dt),t(ct),t(pe),t(mt),t(gt),t(ut),t(w),t(ft),t(ht),t(v),t(_t),t(bt),t(Z),t(yt),t(wt),t(De)),t(d),_(T,e),_(E,e),_(ne,e),_(se,e),_(oe,e),_(le,e),_(de,e),_(ce,e),_(me),_(ge),_(B),_(ue),_(fe),_(he),_(_e),_(be),_(ye,e),_(we),_(ve),_(V),_(Se),_(Me),_(Te),_(xe),_(Ie),_($e,e),_(Je),_(Ue,e)}}}const Qn='{"title":"SANA-Sprint","local":"sana-sprint","sections":[{"title":"Quantization","local":"quantization","sections":[],"depth":2},{"title":"Setting max_timesteps","local":"setting-maxtimesteps","sections":[],"depth":2},{"title":"Image to Image","local":"image-to-image","sections":[],"depth":2},{"title":"SanaSprintPipeline","local":"diffusers.SanaSprintPipeline","sections":[],"depth":2},{"title":"SanaSprintImg2ImgPipeline","local":"diffusers.SanaSprintImg2ImgPipeline","sections":[],"depth":2},{"title":"SanaPipelineOutput","local":"diffusers.pipelines.sana.pipeline_output.SanaPipelineOutput","sections":[],"depth":2}],"depth":1}';function Yn(j){return Nn(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class sa extends An{constructor(d){super(),Vn(this,d,Yn,zn,Gn,{})}}export{sa as component}; | |
Xet Storage Details
- Size:
- 66.6 kB
- Xet hash:
- 856acd209d660babed2e171c2448a40f1b6b3b0ffbe7d8137134d4f16b968fe5
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.