Buckets:
| import{s as _t,o as wt,n as _e}from"../chunks/scheduler.8c3d61f6.js";import{S as yt,i as vt,g as c,s as i,r as _,A as $t,h as d,f as o,c as l,j,u as w,x as T,k as L,y as s,a as u,v as y,d as v,t as $,w as x}from"../chunks/index.da70eac4.js";import{T as tt}from"../chunks/Tip.1d9b8c37.js";import{D as Z}from"../chunks/Docstring.ee4b6913.js";import{C as at}from"../chunks/CodeBlock.00a903b3.js";import{E as nt}from"../chunks/ExampleCodeBlock.f7bd2c1f.js";import{H as st,E as xt}from"../chunks/EditOnGithub.1e64e623.js";function Tt(U){let n,b='Make sure to check out the Stable Diffusion <a href="overview#tips">Tips</a> section to learn how to explore the tradeoff between scheduler speed and quality, and how to reuse pipeline components efficiently!',r,a,g='If you’re interested in using one of the official checkpoints for a task, explore the <a href="https://huggingface.co/CompVis" rel="nofollow">CompVis</a>, <a href="https://huggingface.co/runwayml" rel="nofollow">Runway</a>, and <a href="https://huggingface.co/stabilityai" rel="nofollow">Stability AI</a> Hub organizations!';return{c(){n=c("p"),n.innerHTML=b,r=i(),a=c("p"),a.innerHTML=g},l(t){n=d(t,"P",{"data-svelte-h":!0}),T(n)!=="svelte-1j961ct"&&(n.innerHTML=b),r=l(t),a=d(t,"P",{"data-svelte-h":!0}),T(a)!=="svelte-z4pn9c"&&(a.innerHTML=g)},m(t,h){u(t,n,h),u(t,r,h),u(t,a,h)},p:_e,d(t){t&&(o(n),o(r),o(a))}}}function Ut(U){let n,b="Examples:",r,a,g;return a=new at({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMFN0YWJsZURpZmZ1c2lvbkxhdGVudFVwc2NhbGVQaXBlbGluZSUyQyUyMFN0YWJsZURpZmZ1c2lvblBpcGVsaW5lJTBBaW1wb3J0JTIwdG9yY2glMEElMEElMEFwaXBlbGluZSUyMCUzRCUyMFN0YWJsZURpZmZ1c2lvblBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJDb21wVmlzJTJGc3RhYmxlLWRpZmZ1c2lvbi12MS00JTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTBBKSUwQXBpcGVsaW5lLnRvKCUyMmN1ZGElMjIpJTBBJTBBbW9kZWxfaWQlMjAlM0QlMjAlMjJzdGFiaWxpdHlhaSUyRnNkLXgyLWxhdGVudC11cHNjYWxlciUyMiUwQXVwc2NhbGVyJTIwJTNEJTIwU3RhYmxlRGlmZnVzaW9uTGF0ZW50VXBzY2FsZVBpcGVsaW5lLmZyb21fcHJldHJhaW5lZChtb2RlbF9pZCUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiklMEF1cHNjYWxlci50byglMjJjdWRhJTIyKSUwQSUwQXByb21wdCUyMCUzRCUyMCUyMmElMjBwaG90byUyMG9mJTIwYW4lMjBhc3Ryb25hdXQlMjBoaWdoJTIwcmVzb2x1dGlvbiUyQyUyMHVucmVhbCUyMGVuZ2luZSUyQyUyMHVsdHJhJTIwcmVhbGlzdGljJTIyJTBBZ2VuZXJhdG9yJTIwJTNEJTIwdG9yY2gubWFudWFsX3NlZWQoMzMpJTBBJTBBbG93X3Jlc19sYXRlbnRzJTIwJTNEJTIwcGlwZWxpbmUocHJvbXB0JTJDJTIwZ2VuZXJhdG9yJTNEZ2VuZXJhdG9yJTJDJTIwb3V0cHV0X3R5cGUlM0QlMjJsYXRlbnQlMjIpLmltYWdlcyUwQSUwQXdpdGglMjB0b3JjaC5ub19ncmFkKCklM0ElMEElMjAlMjAlMjAlMjBpbWFnZSUyMCUzRCUyMHBpcGVsaW5lLmRlY29kZV9sYXRlbnRzKGxvd19yZXNfbGF0ZW50cyklMEFpbWFnZSUyMCUzRCUyMHBpcGVsaW5lLm51bXB5X3RvX3BpbChpbWFnZSklNUIwJTVEJTBBJTBBaW1hZ2Uuc2F2ZSglMjIuLiUyRmltYWdlcyUyRmExLnBuZyUyMiklMEElMEF1cHNjYWxlZF9pbWFnZSUyMCUzRCUyMHVwc2NhbGVyKCUwQSUyMCUyMCUyMCUyMHByb21wdCUzRHByb21wdCUyQyUwQSUyMCUyMCUyMCUyMGltYWdlJTNEbG93X3Jlc19sYXRlbnRzJTJDJTBBJTIwJTIwJTIwJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDIwJTJDJTBBJTIwJTIwJTIwJTIwZ3VpZGFuY2Vfc2NhbGUlM0QwJTJDJTBBJTIwJTIwJTIwJTIwZ2VuZXJhdG9yJTNEZ2VuZXJhdG9yJTJDJTBBKS5pbWFnZXMlNUIwJTVEJTBBJTBBdXBzY2FsZWRfaW1hZ2Uuc2F2ZSglMjIuLiUyRmltYWdlcyUyRmEyLnBuZyUyMik=",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span>pipeline = StableDiffusionPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"CompVis/stable-diffusion-v1-4"</span>, torch_dtype=torch.float16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipeline.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>model_id = <span class="hljs-string">"stabilityai/sd-x2-latent-upscaler"</span> | |
| <span class="hljs-meta">>>> </span>upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained(model_id, torch_dtype=torch.float16) | |
| <span class="hljs-meta">>>> </span>upscaler.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"a photo of an astronaut high resolution, unreal engine, ultra realistic"</span> | |
| <span class="hljs-meta">>>> </span>generator = torch.manual_seed(<span class="hljs-number">33</span>) | |
| <span class="hljs-meta">>>> </span>low_res_latents = pipeline(prompt, generator=generator, output_type=<span class="hljs-string">"latent"</span>).images | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">with</span> torch.no_grad(): | |
| <span class="hljs-meta">... </span> image = pipeline.decode_latents(low_res_latents) | |
| <span class="hljs-meta">>>> </span>image = pipeline.numpy_to_pil(image)[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>image.save(<span class="hljs-string">"../images/a1.png"</span>) | |
| <span class="hljs-meta">>>> </span>upscaled_image = upscaler( | |
| <span class="hljs-meta">... </span> prompt=prompt, | |
| <span class="hljs-meta">... </span> image=low_res_latents, | |
| <span class="hljs-meta">... </span> num_inference_steps=<span class="hljs-number">20</span>, | |
| <span class="hljs-meta">... </span> guidance_scale=<span class="hljs-number">0</span>, | |
| <span class="hljs-meta">... </span> generator=generator, | |
| <span class="hljs-meta">... </span>).images[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>upscaled_image.save(<span class="hljs-string">"../images/a2.png"</span>)`,wrap:!1}}),{c(){n=c("p"),n.textContent=b,r=i(),_(a.$$.fragment)},l(t){n=d(t,"P",{"data-svelte-h":!0}),T(n)!=="svelte-kvfsh7"&&(n.textContent=b),r=l(t),w(a.$$.fragment,t)},m(t,h){u(t,n,h),u(t,r,h),y(a,t,h),g=!0},p:_e,i(t){g||(v(a.$$.fragment,t),g=!0)},o(t){$(a.$$.fragment,t),g=!1},d(t){t&&(o(n),o(r)),x(a,t)}}}function Jt(U){let n,b=`⚠️ Don’t enable attention slicing if you’re already using <code>scaled_dot_product_attention</code> (SDPA) from PyTorch | |
| 2.0 or xFormers. These attention computations are already very memory efficient so you won’t need to enable | |
| this function. If you enable attention slicing with SDPA or xFormers, it can lead to serious slow downs!`;return{c(){n=c("p"),n.innerHTML=b},l(r){n=d(r,"P",{"data-svelte-h":!0}),T(n)!=="svelte-ackzsn"&&(n.innerHTML=b)},m(r,a){u(r,n,a)},p:_e,d(r){r&&o(n)}}}function Mt(U){let n,b="Examples:",r,a,g;return a=new at({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwU3RhYmxlRGlmZnVzaW9uUGlwZWxpbmUlMEElMEFwaXBlJTIwJTNEJTIwU3RhYmxlRGlmZnVzaW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMnJ1bndheW1sJTJGc3RhYmxlLWRpZmZ1c2lvbi12MS01JTIyJTJDJTBBJTIwJTIwJTIwJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTJDJTBBJTIwJTIwJTIwJTIwdXNlX3NhZmV0ZW5zb3JzJTNEVHJ1ZSUyQyUwQSklMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJhJTIwcGhvdG8lMjBvZiUyMGFuJTIwYXN0cm9uYXV0JTIwcmlkaW5nJTIwYSUyMGhvcnNlJTIwb24lMjBtYXJzJTIyJTBBcGlwZS5lbmFibGVfYXR0ZW50aW9uX3NsaWNpbmcoKSUwQWltYWdlJTIwJTNEJTIwcGlwZShwcm9tcHQpLmltYWdlcyU1QjAlNUQ=",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionPipeline | |
| <span class="hljs-meta">>>> </span>pipe = StableDiffusionPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"runwayml/stable-diffusion-v1-5"</span>, | |
| <span class="hljs-meta">... </span> torch_dtype=torch.float16, | |
| <span class="hljs-meta">... </span> use_safetensors=<span class="hljs-literal">True</span>, | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"a photo of an astronaut riding a horse on mars"</span> | |
| <span class="hljs-meta">>>> </span>pipe.enable_attention_slicing() | |
| <span class="hljs-meta">>>> </span>image = pipe(prompt).images[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){n=c("p"),n.textContent=b,r=i(),_(a.$$.fragment)},l(t){n=d(t,"P",{"data-svelte-h":!0}),T(n)!=="svelte-kvfsh7"&&(n.textContent=b),r=l(t),w(a.$$.fragment,t)},m(t,h){u(t,n,h),u(t,r,h),y(a,t,h),g=!0},p:_e,i(t){g||(v(a.$$.fragment,t),g=!0)},o(t){$(a.$$.fragment,t),g=!1},d(t){t&&(o(n),o(r)),x(a,t)}}}function It(U){let n,b=`⚠️ When memory efficient attention and sliced attention are both enabled, memory efficient attention takes | |
| precedent.`;return{c(){n=c("p"),n.textContent=b},l(r){n=d(r,"P",{"data-svelte-h":!0}),T(n)!=="svelte-17p1lpg"&&(n.textContent=b)},m(r,a){u(r,n,a)},p:_e,d(r){r&&o(n)}}}function Dt(U){let n,b="Examples:",r,a,g;return a=new at({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwRGlmZnVzaW9uUGlwZWxpbmUlMEFmcm9tJTIweGZvcm1lcnMub3BzJTIwaW1wb3J0JTIwTWVtb3J5RWZmaWNpZW50QXR0ZW50aW9uRmxhc2hBdHRlbnRpb25PcCUwQSUwQXBpcGUlMjAlM0QlMjBEaWZmdXNpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTIyc3RhYmlsaXR5YWklMkZzdGFibGUtZGlmZnVzaW9uLTItMSUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiklMEFwaXBlJTIwJTNEJTIwcGlwZS50byglMjJjdWRhJTIyKSUwQXBpcGUuZW5hYmxlX3hmb3JtZXJzX21lbW9yeV9lZmZpY2llbnRfYXR0ZW50aW9uKGF0dGVudGlvbl9vcCUzRE1lbW9yeUVmZmljaWVudEF0dGVudGlvbkZsYXNoQXR0ZW50aW9uT3ApJTBBJTIzJTIwV29ya2Fyb3VuZCUyMGZvciUyMG5vdCUyMGFjY2VwdGluZyUyMGF0dGVudGlvbiUyMHNoYXBlJTIwdXNpbmclMjBWQUUlMjBmb3IlMjBGbGFzaCUyMEF0dGVudGlvbiUwQXBpcGUudmFlLmVuYWJsZV94Zm9ybWVyc19tZW1vcnlfZWZmaWNpZW50X2F0dGVudGlvbihhdHRlbnRpb25fb3AlM0ROb25lKQ==",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> DiffusionPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> xformers.ops <span class="hljs-keyword">import</span> MemoryEfficientAttentionFlashAttentionOp | |
| <span class="hljs-meta">>>> </span>pipe = DiffusionPipeline.from_pretrained(<span class="hljs-string">"stabilityai/stable-diffusion-2-1"</span>, torch_dtype=torch.float16) | |
| <span class="hljs-meta">>>> </span>pipe = pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>pipe.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp) | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Workaround for not accepting attention shape using VAE for Flash Attention</span> | |
| <span class="hljs-meta">>>> </span>pipe.vae.enable_xformers_memory_efficient_attention(attention_op=<span class="hljs-literal">None</span>)`,wrap:!1}}),{c(){n=c("p"),n.textContent=b,r=i(),_(a.$$.fragment)},l(t){n=d(t,"P",{"data-svelte-h":!0}),T(n)!=="svelte-kvfsh7"&&(n.textContent=b),r=l(t),w(a.$$.fragment,t)},m(t,h){u(t,n,h),u(t,r,h),y(a,t,h),g=!0},p:_e,i(t){g||(v(a.$$.fragment,t),g=!0)},o(t){$(a.$$.fragment,t),g=!1},d(t){t&&(o(n),o(r)),x(a,t)}}}function jt(U){let n,b,r,a,g,t,h,ot='The Stable Diffusion latent upscaler model was created by <a href="https://github.com/crowsonkb/k-diffusion" rel="nofollow">Katherine Crowson</a> in collaboration with <a href="https://stability.ai/" rel="nofollow">Stability AI</a>. It is used to enhance the output image resolution by a factor of 2 (see this demo <a href="https://colab.research.google.com/drive/1o1qYJcFeywzCIdkfKJy7cTpgZTCM2EI4" rel="nofollow">notebook</a> for a demonstration of the original implementation).',ye,G,ve,z,$e,f,Y,Le,ie,it="Pipeline for upscaling Stable Diffusion output image resolution by a factor of 2.",Pe,le,lt=`This model inherits from <a href="/docs/diffusers/main/en/api/pipelines/overview#diffusers.DiffusionPipeline">DiffusionPipeline</a>. Check the superclass documentation for the generic methods | |
| implemented for all pipelines (downloading, saving, running on a particular device, etc.).`,Ce,re,rt="The pipeline also inherits the following loading methods:",Se,pe,pt='<li><a href="/docs/diffusers/main/en/api/loaders/single_file#diffusers.loaders.FromSingleFileMixin.from_single_file">from_single_file()</a> for loading <code>.ckpt</code> files</li>',Ze,P,H,Ge,ce,ct="The call function to the pipeline for generation.",We,W,ke,k,q,Ve,de,dt=`Offloads all models to CPU using 🤗 Accelerate, significantly reducing memory usage. When called, the state | |
| dicts of all <code>torch.nn.Module</code> components (except those in <code>self._exclude_from_cpu_offload</code>) are saved to CPU | |
| and then moved to <code>torch.device('meta')</code> and loaded to GPU only when their specific submodule has its <code>forward</code> | |
| method called. Offloading happens on a submodule basis. Memory savings are higher than with | |
| <code>enable_model_cpu_offload</code>, but performance is lower.`,Be,J,Q,Ee,fe,ft=`Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor | |
| in slices to compute attention in several steps. For more than one attention head, the computation is performed | |
| sequentially over each head. This is useful to save some memory in exchange for a small speed decrease.`,Ne,V,Re,B,Xe,E,A,Fe,me,mt=`Disable sliced attention computation. If <code>enable_attention_slicing</code> was previously called, attention is | |
| computed in one step.`,ze,M,O,Ye,ue,ut=`Enable memory efficient attention from <a href="https://facebookresearch.github.io/xformers/" rel="nofollow">xFormers</a>. When this | |
| option is enabled, you should observe lower GPU memory usage and a potential speed up during inference. Speed | |
| up during training is not guaranteed.`,He,N,qe,R,Qe,X,K,Ae,ge,gt='Disable memory efficient attention from <a href="https://facebookresearch.github.io/xformers/" rel="nofollow">xFormers</a>.',Oe,F,ee,Ke,he,ht="Encodes the prompt into text encoder hidden states.",xe,te,Te,C,ne,et,be,bt="Output class for Stable Diffusion pipelines.",Ue,se,Je,we,Me;return g=new st({props:{title:"Latent upscaler",local:"latent-upscaler",headingTag:"h1"}}),G=new tt({props:{$$slots:{default:[Tt]},$$scope:{ctx:U}}}),z=new st({props:{title:"StableDiffusionLatentUpscalePipeline",local:"diffusers.StableDiffusionLatentUpscalePipeline",headingTag:"h2"}}),Y=new Z({props:{name:"class diffusers.StableDiffusionLatentUpscalePipeline",anchor:"diffusers.StableDiffusionLatentUpscalePipeline",parameters:[{name:"vae",val:": AutoencoderKL"},{name:"text_encoder",val:": CLIPTextModel"},{name:"tokenizer",val:": CLIPTokenizer"},{name:"unet",val:": UNet2DConditionModel"},{name:"scheduler",val:": EulerDiscreteScheduler"}],parametersDescription:[{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/main/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) — | |
| Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.text_encoder",description:`<strong>text_encoder</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPTextModel" rel="nofollow">CLIPTextModel</a>) — | |
| Frozen text-encoder (<a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a>).`,name:"text_encoder"},{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>) — | |
| A <code>CLIPTokenizer</code> to tokenize text.`,name:"tokenizer"},{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.unet",description:`<strong>unet</strong> (<a href="/docs/diffusers/main/en/api/models/unet2d-cond#diffusers.UNet2DConditionModel">UNet2DConditionModel</a>) — | |
| A <code>UNet2DConditionModel</code> to denoise the encoded image latents.`,name:"unet"},{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/main/en/api/schedulers/overview#diffusers.SchedulerMixin">SchedulerMixin</a>) — | |
| A <a href="/docs/diffusers/main/en/api/schedulers/euler#diffusers.EulerDiscreteScheduler">EulerDiscreteScheduler</a> to be used in combination with <code>unet</code> to denoise the encoded image latents.`,name:"scheduler"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py#L77"}}),H=new Z({props:{name:"__call__",anchor:"diffusers.StableDiffusionLatentUpscalePipeline.__call__",parameters:[{name:"prompt",val:": Union = None"},{name:"image",val:": Union = None"},{name:"num_inference_steps",val:": int = 75"},{name:"guidance_scale",val:": float = 9.0"},{name:"negative_prompt",val:": Union = None"},{name:"generator",val:": Union = None"},{name:"latents",val:": Optional = None"},{name:"prompt_embeds",val:": Optional = None"},{name:"negative_prompt_embeds",val:": Optional = None"},{name:"pooled_prompt_embeds",val:": Optional = None"},{name:"negative_pooled_prompt_embeds",val:": Optional = None"},{name:"output_type",val:": Optional = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"callback",val:": Optional = None"},{name:"callback_steps",val:": int = 1"}],parametersDescription:[{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>) — | |
| The prompt or prompts to guide image upscaling.`,name:"prompt"},{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.__call__.image",description:`<strong>image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, or <code>List[np.ndarray]</code>) — | |
| <code>Image</code> or tensor representing an image batch to be upscaled. If it’s a tensor, it can be either a | |
| latent output from a Stable Diffusion model or an image tensor in the range <code>[-1, 1]</code>. It is considered | |
| a <code>latent</code> if <code>image.shape[1]</code> is <code>4</code>; otherwise, it is considered to be an image representation and | |
| encoded using this pipeline’s <code>vae</code> encoder.`,name:"image"},{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 7.5) — | |
| A higher guidance scale value encourages the model to generate images closely linked to the text | |
| <code>prompt</code> at the expense of lower image quality. Guidance scale is enabled when <code>guidance_scale > 1</code>.`,name:"guidance_scale"},{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide what to not include in image generation. If not defined, you need to | |
| pass <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (<code>guidance_scale < 1</code>).`,name:"negative_prompt"},{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) — | |
| Corresponds to parameter eta (η) from the <a href="https://arxiv.org/abs/2010.02502" rel="nofollow">DDIM</a> paper. Only applies | |
| to the <a href="/docs/diffusers/main/en/api/schedulers/ddim#diffusers.DDIMScheduler">DDIMScheduler</a>, and is ignored in other schedulers.`,name:"eta"},{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| A <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow"><code>torch.Generator</code></a> to make | |
| generation deterministic.`,name:"generator"},{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor is generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generated image. Choose between <code>PIL.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <a href="/docs/diffusers/main/en/api/pipelines/stable_diffusion/depth2img#diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput">StableDiffusionPipelineOutput</a> instead of a | |
| plain tuple.`,name:"return_dict"},{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.__call__.callback",description:`<strong>callback</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls every <code>callback_steps</code> steps during inference. The function is called with the | |
| following arguments: <code>callback(step: int, timestep: int, latents: torch.Tensor)</code>.`,name:"callback"},{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.__call__.callback_steps",description:`<strong>callback_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The frequency at which the <code>callback</code> function is called. If not specified, the callback is called at | |
| every step.`,name:"callback_steps"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py#L389",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>If <code>return_dict</code> is <code>True</code>, <a | |
| href="/docs/diffusers/main/en/api/pipelines/stable_diffusion/depth2img#diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput" | |
| >StableDiffusionPipelineOutput</a> is returned, | |
| otherwise a <code>tuple</code> is returned where the first element is a list with the generated images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><a | |
| href="/docs/diffusers/main/en/api/pipelines/stable_diffusion/depth2img#diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput" | |
| >StableDiffusionPipelineOutput</a> or <code>tuple</code></p> | |
| `}}),W=new nt({props:{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.__call__.example",$$slots:{default:[Ut]},$$scope:{ctx:U}}}),q=new Z({props:{name:"enable_sequential_cpu_offload",anchor:"diffusers.StableDiffusionLatentUpscalePipeline.enable_sequential_cpu_offload",parameters:[{name:"gpu_id",val:": Optional = None"},{name:"device",val:": Union = 'cuda'"}],parametersDescription:[{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.enable_sequential_cpu_offload.gpu_id",description:`<strong>gpu_id</strong> (<code>int</code>, <em>optional</em>) — | |
| The ID of the accelerator that shall be used in inference. If not specified, it will default to 0.`,name:"gpu_id"},{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.enable_sequential_cpu_offload.device",description:`<strong>device</strong> (<code>torch.Device</code> or <code>str</code>, <em>optional</em>, defaults to “cuda”) — | |
| The PyTorch device type of the accelerator that shall be used in inference. If not specified, it will | |
| default to “cuda”.`,name:"device"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pipeline_utils.py#L1072"}}),Q=new Z({props:{name:"enable_attention_slicing",anchor:"diffusers.StableDiffusionLatentUpscalePipeline.enable_attention_slicing",parameters:[{name:"slice_size",val:": Union = 'auto'"}],parametersDescription:[{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.enable_attention_slicing.slice_size",description:`<strong>slice_size</strong> (<code>str</code> or <code>int</code>, <em>optional</em>, defaults to <code>"auto"</code>) — | |
| When <code>"auto"</code>, halves the input to the attention heads, so attention will be computed in two steps. If | |
| <code>"max"</code>, maximum amount of memory will be saved by running only one slice at a time. If a number is | |
| provided, uses as many slices as <code>attention_head_dim // slice_size</code>. In this case, <code>attention_head_dim</code> | |
| must be a multiple of <code>slice_size</code>.`,name:"slice_size"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pipeline_utils.py#L1662"}}),V=new tt({props:{warning:!0,$$slots:{default:[Jt]},$$scope:{ctx:U}}}),B=new nt({props:{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.enable_attention_slicing.example",$$slots:{default:[Mt]},$$scope:{ctx:U}}}),A=new Z({props:{name:"disable_attention_slicing",anchor:"diffusers.StableDiffusionLatentUpscalePipeline.disable_attention_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pipeline_utils.py#L1702"}}),O=new Z({props:{name:"enable_xformers_memory_efficient_attention",anchor:"diffusers.StableDiffusionLatentUpscalePipeline.enable_xformers_memory_efficient_attention",parameters:[{name:"attention_op",val:": Optional = None"}],parametersDescription:[{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.enable_xformers_memory_efficient_attention.attention_op",description:`<strong>attention_op</strong> (<code>Callable</code>, <em>optional</em>) — | |
| Override the default <code>None</code> operator for use as <code>op</code> argument to the | |
| <a href="https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.memory_efficient_attention" rel="nofollow"><code>memory_efficient_attention()</code></a> | |
| function of xFormers.`,name:"attention_op"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pipeline_utils.py#L1601"}}),N=new tt({props:{warning:!0,$$slots:{default:[It]},$$scope:{ctx:U}}}),R=new nt({props:{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.enable_xformers_memory_efficient_attention.example",$$slots:{default:[Dt]},$$scope:{ctx:U}}}),K=new Z({props:{name:"disable_xformers_memory_efficient_attention",anchor:"diffusers.StableDiffusionLatentUpscalePipeline.disable_xformers_memory_efficient_attention",parameters:[],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pipeline_utils.py#L1636"}}),ee=new Z({props:{name:"encode_prompt",anchor:"diffusers.StableDiffusionLatentUpscalePipeline.encode_prompt",parameters:[{name:"prompt",val:""},{name:"device",val:""},{name:"do_classifier_free_guidance",val:""},{name:"negative_prompt",val:" = None"},{name:"prompt_embeds",val:": Optional = None"},{name:"negative_prompt_embeds",val:": Optional = None"},{name:"pooled_prompt_embeds",val:": Optional = None"},{name:"negative_pooled_prompt_embeds",val:": Optional = None"}],parametersDescription:[{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>list(int)</code>) — | |
| prompt to be encoded | |
| device — (<code>torch.device</code>): | |
| torch device`,name:"prompt"},{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>) — | |
| whether to use classifier free guidance or not`,name:"do_classifier_free_guidance"},{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>) — | |
| The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored | |
| if <code>guidance_scale</code> is less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.encode_prompt.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. | |
| If not provided, pooled text embeddings will be generated from <code>prompt</code> input argument.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.StableDiffusionLatentUpscalePipeline.encode_prompt.negative_pooled_prompt_embeds",description:`<strong>negative_pooled_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, pooled negative_prompt_embeds will be generated from <code>negative_prompt</code> | |
| input argument.`,name:"negative_pooled_prompt_embeds"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py#L159"}}),te=new st({props:{title:"StableDiffusionPipelineOutput",local:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput",headingTag:"h2"}}),ne=new Z({props:{name:"class diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput",anchor:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput",parameters:[{name:"images",val:": Union"},{name:"nsfw_content_detected",val:": Optional"}],parametersDescription:[{anchor:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput.images",description:`<strong>images</strong> (<code>List[PIL.Image.Image]</code> or <code>np.ndarray</code>) — | |
| List of denoised PIL images of length <code>batch_size</code> or NumPy array of shape <code>(batch_size, height, width, num_channels)</code>.`,name:"images"},{anchor:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput.nsfw_content_detected",description:`<strong>nsfw_content_detected</strong> (<code>List[bool]</code>) — | |
| List indicating whether the corresponding generated image contains “not-safe-for-work” (nsfw) content or | |
| <code>None</code> if safety checking could not be performed.`,name:"nsfw_content_detected"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_output.py#L10"}}),se=new xt({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/stable_diffusion/latent_upscale.md"}}),{c(){n=c("meta"),b=i(),r=c("p"),a=i(),_(g.$$.fragment),t=i(),h=c("p"),h.innerHTML=ot,ye=i(),_(G.$$.fragment),ve=i(),_(z.$$.fragment),$e=i(),f=c("div"),_(Y.$$.fragment),Le=i(),ie=c("p"),ie.textContent=it,Pe=i(),le=c("p"),le.innerHTML=lt,Ce=i(),re=c("p"),re.textContent=rt,Se=i(),pe=c("ul"),pe.innerHTML=pt,Ze=i(),P=c("div"),_(H.$$.fragment),Ge=i(),ce=c("p"),ce.textContent=ct,We=i(),_(W.$$.fragment),ke=i(),k=c("div"),_(q.$$.fragment),Ve=i(),de=c("p"),de.innerHTML=dt,Be=i(),J=c("div"),_(Q.$$.fragment),Ee=i(),fe=c("p"),fe.textContent=ft,Ne=i(),_(V.$$.fragment),Re=i(),_(B.$$.fragment),Xe=i(),E=c("div"),_(A.$$.fragment),Fe=i(),me=c("p"),me.innerHTML=mt,ze=i(),M=c("div"),_(O.$$.fragment),Ye=i(),ue=c("p"),ue.innerHTML=ut,He=i(),_(N.$$.fragment),qe=i(),_(R.$$.fragment),Qe=i(),X=c("div"),_(K.$$.fragment),Ae=i(),ge=c("p"),ge.innerHTML=gt,Oe=i(),F=c("div"),_(ee.$$.fragment),Ke=i(),he=c("p"),he.textContent=ht,xe=i(),_(te.$$.fragment),Te=i(),C=c("div"),_(ne.$$.fragment),et=i(),be=c("p"),be.textContent=bt,Ue=i(),_(se.$$.fragment),Je=i(),we=c("p"),this.h()},l(e){const p=$t("svelte-u9bgzb",document.head);n=d(p,"META",{name:!0,content:!0}),p.forEach(o),b=l(e),r=d(e,"P",{}),j(r).forEach(o),a=l(e),w(g.$$.fragment,e),t=l(e),h=d(e,"P",{"data-svelte-h":!0}),T(h)!=="svelte-1gpdp46"&&(h.innerHTML=ot),ye=l(e),w(G.$$.fragment,e),ve=l(e),w(z.$$.fragment,e),$e=l(e),f=d(e,"DIV",{class:!0});var m=j(f);w(Y.$$.fragment,m),Le=l(m),ie=d(m,"P",{"data-svelte-h":!0}),T(ie)!=="svelte-s74qtl"&&(ie.textContent=it),Pe=l(m),le=d(m,"P",{"data-svelte-h":!0}),T(le)!=="svelte-496sm0"&&(le.innerHTML=lt),Ce=l(m),re=d(m,"P",{"data-svelte-h":!0}),T(re)!=="svelte-14s6m4u"&&(re.textContent=rt),Se=l(m),pe=d(m,"UL",{"data-svelte-h":!0}),T(pe)!=="svelte-x9awsn"&&(pe.innerHTML=pt),Ze=l(m),P=d(m,"DIV",{class:!0});var S=j(P);w(H.$$.fragment,S),Ge=l(S),ce=d(S,"P",{"data-svelte-h":!0}),T(ce)!=="svelte-50j04k"&&(ce.textContent=ct),We=l(S),w(W.$$.fragment,S),S.forEach(o),ke=l(m),k=d(m,"DIV",{class:!0});var ae=j(k);w(q.$$.fragment,ae),Ve=l(ae),de=d(ae,"P",{"data-svelte-h":!0}),T(de)!=="svelte-n1oyuv"&&(de.innerHTML=dt),ae.forEach(o),Be=l(m),J=d(m,"DIV",{class:!0});var I=j(J);w(Q.$$.fragment,I),Ee=l(I),fe=d(I,"P",{"data-svelte-h":!0}),T(fe)!=="svelte-10jaql7"&&(fe.textContent=ft),Ne=l(I),w(V.$$.fragment,I),Re=l(I),w(B.$$.fragment,I),I.forEach(o),Xe=l(m),E=d(m,"DIV",{class:!0});var oe=j(E);w(A.$$.fragment,oe),Fe=l(oe),me=d(oe,"P",{"data-svelte-h":!0}),T(me)!=="svelte-1lh0nh5"&&(me.innerHTML=mt),oe.forEach(o),ze=l(m),M=d(m,"DIV",{class:!0});var D=j(M);w(O.$$.fragment,D),Ye=l(D),ue=d(D,"P",{"data-svelte-h":!0}),T(ue)!=="svelte-e03q3e"&&(ue.innerHTML=ut),He=l(D),w(N.$$.fragment,D),qe=l(D),w(R.$$.fragment,D),D.forEach(o),Qe=l(m),X=d(m,"DIV",{class:!0});var Ie=j(X);w(K.$$.fragment,Ie),Ae=l(Ie),ge=d(Ie,"P",{"data-svelte-h":!0}),T(ge)!=="svelte-1vfte1e"&&(ge.innerHTML=gt),Ie.forEach(o),Oe=l(m),F=d(m,"DIV",{class:!0});var De=j(F);w(ee.$$.fragment,De),Ke=l(De),he=d(De,"P",{"data-svelte-h":!0}),T(he)!=="svelte-16q0ax1"&&(he.textContent=ht),De.forEach(o),m.forEach(o),xe=l(e),w(te.$$.fragment,e),Te=l(e),C=d(e,"DIV",{class:!0});var je=j(C);w(ne.$$.fragment,je),et=l(je),be=d(je,"P",{"data-svelte-h":!0}),T(be)!=="svelte-1qpjiuf"&&(be.textContent=bt),je.forEach(o),Ue=l(e),w(se.$$.fragment,e),Je=l(e),we=d(e,"P",{}),j(we).forEach(o),this.h()},h(){L(n,"name","hf:doc:metadata"),L(n,"content",Lt),L(P,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),L(k,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),L(J,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),L(E,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),L(M,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),L(X,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),L(F,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),L(f,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),L(C,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,p){s(document.head,n),u(e,b,p),u(e,r,p),u(e,a,p),y(g,e,p),u(e,t,p),u(e,h,p),u(e,ye,p),y(G,e,p),u(e,ve,p),y(z,e,p),u(e,$e,p),u(e,f,p),y(Y,f,null),s(f,Le),s(f,ie),s(f,Pe),s(f,le),s(f,Ce),s(f,re),s(f,Se),s(f,pe),s(f,Ze),s(f,P),y(H,P,null),s(P,Ge),s(P,ce),s(P,We),y(W,P,null),s(f,ke),s(f,k),y(q,k,null),s(k,Ve),s(k,de),s(f,Be),s(f,J),y(Q,J,null),s(J,Ee),s(J,fe),s(J,Ne),y(V,J,null),s(J,Re),y(B,J,null),s(f,Xe),s(f,E),y(A,E,null),s(E,Fe),s(E,me),s(f,ze),s(f,M),y(O,M,null),s(M,Ye),s(M,ue),s(M,He),y(N,M,null),s(M,qe),y(R,M,null),s(f,Qe),s(f,X),y(K,X,null),s(X,Ae),s(X,ge),s(f,Oe),s(f,F),y(ee,F,null),s(F,Ke),s(F,he),u(e,xe,p),y(te,e,p),u(e,Te,p),u(e,C,p),y(ne,C,null),s(C,et),s(C,be),u(e,Ue,p),y(se,e,p),u(e,Je,p),u(e,we,p),Me=!0},p(e,[p]){const m={};p&2&&(m.$$scope={dirty:p,ctx:e}),G.$set(m);const S={};p&2&&(S.$$scope={dirty:p,ctx:e}),W.$set(S);const ae={};p&2&&(ae.$$scope={dirty:p,ctx:e}),V.$set(ae);const I={};p&2&&(I.$$scope={dirty:p,ctx:e}),B.$set(I);const oe={};p&2&&(oe.$$scope={dirty:p,ctx:e}),N.$set(oe);const D={};p&2&&(D.$$scope={dirty:p,ctx:e}),R.$set(D)},i(e){Me||(v(g.$$.fragment,e),v(G.$$.fragment,e),v(z.$$.fragment,e),v(Y.$$.fragment,e),v(H.$$.fragment,e),v(W.$$.fragment,e),v(q.$$.fragment,e),v(Q.$$.fragment,e),v(V.$$.fragment,e),v(B.$$.fragment,e),v(A.$$.fragment,e),v(O.$$.fragment,e),v(N.$$.fragment,e),v(R.$$.fragment,e),v(K.$$.fragment,e),v(ee.$$.fragment,e),v(te.$$.fragment,e),v(ne.$$.fragment,e),v(se.$$.fragment,e),Me=!0)},o(e){$(g.$$.fragment,e),$(G.$$.fragment,e),$(z.$$.fragment,e),$(Y.$$.fragment,e),$(H.$$.fragment,e),$(W.$$.fragment,e),$(q.$$.fragment,e),$(Q.$$.fragment,e),$(V.$$.fragment,e),$(B.$$.fragment,e),$(A.$$.fragment,e),$(O.$$.fragment,e),$(N.$$.fragment,e),$(R.$$.fragment,e),$(K.$$.fragment,e),$(ee.$$.fragment,e),$(te.$$.fragment,e),$(ne.$$.fragment,e),$(se.$$.fragment,e),Me=!1},d(e){e&&(o(b),o(r),o(a),o(t),o(h),o(ye),o(ve),o($e),o(f),o(xe),o(Te),o(C),o(Ue),o(Je),o(we)),o(n),x(g,e),x(G,e),x(z,e),x(Y),x(H),x(W),x(q),x(Q),x(V),x(B),x(A),x(O),x(N),x(R),x(K),x(ee),x(te,e),x(ne),x(se,e)}}}const Lt='{"title":"Latent upscaler","local":"latent-upscaler","sections":[{"title":"StableDiffusionLatentUpscalePipeline","local":"diffusers.StableDiffusionLatentUpscalePipeline","sections":[],"depth":2},{"title":"StableDiffusionPipelineOutput","local":"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput","sections":[],"depth":2}],"depth":1}';function Pt(U){return wt(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Bt extends yt{constructor(n){super(),vt(this,n,Pt,jt,_t,{})}}export{Bt as component}; | |
Xet Storage Details
- Size:
- 39.2 kB
- Xet hash:
- ec6cd59e425f4a7fcd16fc00ebc38d3c5450aeee10e53f78f0317535a73ae9fc
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.