Buckets:

rtrm's picture
download
raw
38.5 kB
import{s as vt,o as wt,n as bt}from"../chunks/scheduler.8c3d61f6.js";import{S as Pt,i as yt,g as s,s as a,r as g,A as xt,h as r,f as n,c as o,j as M,x as d,u as h,k as $,y as l,a as i,v as _,d as b,t as v,w}from"../chunks/index.da70eac4.js";import{T as $t}from"../chunks/Tip.6f698f24.js";import{D as oe}from"../chunks/Docstring.634d8861.js";import{C as Dt}from"../chunks/CodeBlock.a9c4becf.js";import{E as Tt}from"../chunks/ExampleCodeBlock.f879b663.js";import{H as Ee,E as St}from"../chunks/getInferenceSnippets.ea1775db.js";function kt(se){let p,D='Make sure to check out the Schedulers <a href="../../using-diffusers/schedulers">guide</a> to learn how to explore the tradeoff between scheduler speed and quality, and see the <a href="../../using-diffusers/loading#reuse-a-pipeline">reuse components across pipelines</a> section to learn how to efficiently load the same components into multiple pipelines.';return{c(){p=s("p"),p.innerHTML=D},l(f){p=r(f,"P",{"data-svelte-h":!0}),d(p)!=="svelte-1qn15hi"&&(p.innerHTML=D)},m(f,P){i(f,p,P)},p:bt,d(f){f&&n(p)}}}function Mt(se){let p,D="Examples:",f,P,y;return P=new Dt({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwU3RhYmxlRGlmZnVzaW9uUGFub3JhbWFQaXBlbGluZSUyQyUyMERESU1TY2hlZHVsZXIlMEElMEFtb2RlbF9ja3B0JTIwJTNEJTIwJTIyc3RhYmlsaXR5YWklMkZzdGFibGUtZGlmZnVzaW9uLTItYmFzZSUyMiUwQXNjaGVkdWxlciUyMCUzRCUyMERESU1TY2hlZHVsZXIuZnJvbV9wcmV0cmFpbmVkKG1vZGVsX2NrcHQlMkMlMjBzdWJmb2xkZXIlM0QlMjJzY2hlZHVsZXIlMjIpJTBBcGlwZSUyMCUzRCUyMFN0YWJsZURpZmZ1c2lvblBhbm9yYW1hUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMG1vZGVsX2NrcHQlMkMlMjBzY2hlZHVsZXIlM0RzY2hlZHVsZXIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYlMEEpJTBBJTBBcGlwZSUyMCUzRCUyMHBpcGUudG8oJTIyY3VkYSUyMiklMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJhJTIwcGhvdG8lMjBvZiUyMHRoZSUyMGRvbG9taXRlcyUyMiUwQWltYWdlJTIwJTNEJTIwcGlwZShwcm9tcHQpLmltYWdlcyU1QjAlNUQ=",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionPanoramaPipeline, DDIMScheduler
<span class="hljs-meta">&gt;&gt;&gt; </span>model_ckpt = <span class="hljs-string">&quot;stabilityai/stable-diffusion-2-base&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder=<span class="hljs-string">&quot;scheduler&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe = StableDiffusionPanoramaPipeline.from_pretrained(
<span class="hljs-meta">... </span> model_ckpt, scheduler=scheduler, torch_dtype=torch.float16
<span class="hljs-meta">... </span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe = pipe.to(<span class="hljs-string">&quot;cuda&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;a photo of the dolomites&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>image = pipe(prompt).images[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){p=s("p"),p.textContent=D,f=a(),g(P.$$.fragment)},l(c){p=r(c,"P",{"data-svelte-h":!0}),d(p)!=="svelte-kvfsh7"&&(p.textContent=D),f=o(c),h(P.$$.fragment,c)},m(c,L){i(c,p,L),i(c,f,L),_(P,c,L),y=!0},p:bt,i(c){y||(b(P.$$.fragment,c),y=!0)},o(c){v(P.$$.fragment,c),y=!1},d(c){c&&(n(p),n(f)),w(P,c)}}}function Lt(se){let p,D,f,P,y,c="<p>This pipeline is deprecated but it can still be used. However, we won’t test the pipeline anymore and won’t accept any changes to it. If you run into any issues, reinstall the last Diffusers version that supported this model.</p>",L,z,he,I,et='<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>',_e,E,tt='<a href="https://huggingface.co/papers/2302.08113" rel="nofollow">MultiDiffusion: Fusing Diffusion Paths for Controlled Image Generation</a> is by Omer Bar-Tal, Lior Yariv, Yaron Lipman, and Tali Dekel.',be,N,nt="The abstract from the paper is:",ve,Z,it="<em>Recent advances in text-to-image generation with diffusion models present transformative capabilities in image quality. However, user controllability of the generated image, and fast adaptation to new tasks still remains an open challenge, currently mostly addressed by costly and long re-training and fine-tuning or ad-hoc adaptations to specific image generation tasks. In this work, we present MultiDiffusion, a unified framework that enables versatile and controllable image generation, using a pre-trained text-to-image diffusion model, without any further training or finetuning. At the center of our approach is a new generation process, based on an optimization task that binds together multiple diffusion generation processes with a shared set of parameters or constraints. We show that MultiDiffusion can be readily applied to generate high quality and diverse images that adhere to user-provided controls, such as desired aspect ratio (e.g., panorama), and spatial guiding signals, ranging from tight segmentation masks to bounding boxes.</em>",we,A,at='You can find additional information about MultiDiffusion on the <a href="https://multidiffusion.github.io/" rel="nofollow">project page</a>, <a href="https://github.com/omerbt/MultiDiffusion" rel="nofollow">original codebase</a>, and try it out in a <a href="https://huggingface.co/spaces/weizmannscience/MultiDiffusion" rel="nofollow">demo</a>.',Pe,V,ye,q,ot=`While calling <a href="/docs/diffusers/pr_12403/en/api/pipelines/panorama#diffusers.StableDiffusionPanoramaPipeline">StableDiffusionPanoramaPipeline</a>, it’s possible to specify the <code>view_batch_size</code> parameter to be &gt; 1.
For some GPUs with high performance, this can speedup the generation process and increase VRAM usage.`,xe,J,st="To generate panorama-like images make sure you pass the width parameter accordingly. We recommend a width value of 2048 which is the default.",$e,R,rt="Circular padding is applied to ensure there are no stitching artifacts when working with panoramas to ensure a seamless transition from the rightmost part to the leftmost part. By enabling circular padding (set <code>circular_padding=True</code>), the operation applies additional crops after the rightmost point of the image, allowing the model to “see” the transition from the rightmost part to the leftmost part. This helps maintain visual consistency in a 360-degree sense and creates a proper “panorama” that can be viewed using 360-degree panorama viewers. When decoding latents in Stable Diffusion, circular padding is applied to ensure that the decoded latents match in the RGB space.",De,B,lt=`For example, without circular padding, there is a stitching artifact (default):
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/indoor_%20no_circular_padding.png" alt="img"/>`,Te,F,pt=`But with circular padding, the right and the left parts are matching (<code>circular_padding=True</code>):
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/indoor_%20circular_padding.png" alt="img"/>`,Se,C,ke,W,Me,m,Y,Ne,T,Q,Ze,re,dt="The call function to the pipeline for generation.",Ae,U,Ve,x,X,qe,le,ct="Decode the given latents with padding for circular inference.",Je,pe,mt="Notes:",Re,de,ft="<li>The padding is added to remove boundary artifacts and improve the output quality.</li> <li>This would slightly increase the memory usage.</li> <li>The padding pixels are then removed from the decoded image.</li>",Be,H,K,Fe,ce,ut="Encodes the prompt into text encoder hidden states.",We,j,ee,Ye,me,gt='See <a href="https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298" rel="nofollow">https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298</a>',Qe,G,te,Xe,fe,ht=`Generates a list of views based on the given parameters. Here, we define the mappings F_i (see Eq. 7 in the
MultiDiffusion paper <a href="https://huggingface.co/papers/2302.08113" rel="nofollow">https://huggingface.co/papers/2302.08113</a>). If panorama’s height/width &lt; window_size,
num_blocks of height/width should return 1.`,Le,ne,Ie,S,ie,Ke,ue,_t="Output class for Stable Diffusion pipelines.",Ce,ae,Ue,ge,He;return z=new Ee({props:{title:"MultiDiffusion",local:"multidiffusion",headingTag:"h1"}}),V=new Ee({props:{title:"Tips",local:"tips",headingTag:"h2"}}),C=new $t({props:{$$slots:{default:[kt]},$$scope:{ctx:se}}}),W=new Ee({props:{title:"StableDiffusionPanoramaPipeline",local:"diffusers.StableDiffusionPanoramaPipeline",headingTag:"h2"}}),Y=new oe({props:{name:"class diffusers.StableDiffusionPanoramaPipeline",anchor:"diffusers.StableDiffusionPanoramaPipeline",parameters:[{name:"vae",val:": AutoencoderKL"},{name:"text_encoder",val:": CLIPTextModel"},{name:"tokenizer",val:": CLIPTokenizer"},{name:"unet",val:": UNet2DConditionModel"},{name:"scheduler",val:": DDIMScheduler"},{name:"safety_checker",val:": StableDiffusionSafetyChecker"},{name:"feature_extractor",val:": CLIPImageProcessor"},{name:"image_encoder",val:": typing.Optional[transformers.models.clip.modeling_clip.CLIPVisionModelWithProjection] = None"},{name:"requires_safety_checker",val:": bool = True"}],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py#L158"}}),Q=new oe({props:{name:"__call__",anchor:"diffusers.StableDiffusionPanoramaPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"height",val:": typing.Optional[int] = 512"},{name:"width",val:": typing.Optional[int] = 2048"},{name:"num_inference_steps",val:": int = 50"},{name:"timesteps",val:": typing.List[int] = None"},{name:"guidance_scale",val:": float = 7.5"},{name:"view_batch_size",val:": int = 1"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"eta",val:": float = 0.0"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"ip_adapter_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor], NoneType] = None"},{name:"ip_adapter_image_embeds",val:": typing.Optional[typing.List[torch.Tensor]] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"cross_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"guidance_rescale",val:": float = 0.0"},{name:"circular_padding",val:": bool = False"},{name:"clip_skip",val:": typing.Optional[int] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"**kwargs",val:": typing.Any"}],parametersDescription:[{anchor:"diffusers.StableDiffusionPanoramaPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts to guide image generation. If not defined, you need to pass <code>prompt_embeds</code>.`,name:"prompt"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014;
The height in pixels of the generated image.`,name:"height"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to 2048) &#x2014;
The width in pixels of the generated image. The width is kept high because the pipeline is supposed
generate panorama-like images.`,name:"width"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) &#x2014;
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014;
The timesteps at which to generate the images. If not specified, then the default timestep spacing
strategy of the scheduler is used.`,name:"timesteps"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 7.5) &#x2014;
A higher guidance scale value encourages the model to generate images closely linked to the text
<code>prompt</code> at the expense of lower image quality. Guidance scale is enabled when <code>guidance_scale &gt; 1</code>.`,name:"guidance_scale"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.__call__.view_batch_size",description:`<strong>view_batch_size</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
The batch size to denoise split views. For some GPUs with high performance, higher view batch size can
speedup the generation and increase the VRAM usage.`,name:"view_batch_size"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
pass <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (<code>guidance_scale &lt; 1</code>).`,name:"negative_prompt"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014;
Corresponds to parameter eta (&#x3B7;) from the <a href="https://huggingface.co/papers/2010.02502" rel="nofollow">DDIM</a> paper. Only
applies to the <a href="/docs/diffusers/pr_12403/en/api/schedulers/ddim#diffusers.DDIMScheduler">DDIMScheduler</a>, and is ignored in other schedulers.`,name:"eta"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) &#x2014;
A <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow"><code>torch.Generator</code></a> to make
generation deterministic.`,name:"generator"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor is generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
provided, text embeddings are generated from the <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
not provided, <code>negative_prompt_embeds</code> are generated from the <code>negative_prompt</code> input argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.__call__.ip_adapter_image",description:`<strong>ip_adapter_image</strong> &#x2014; (<code>PipelineImageInput</code>, <em>optional</em>):
Optional image input to work with IP Adapters.`,name:"ip_adapter_image"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.__call__.ip_adapter_image_embeds",description:`<strong>ip_adapter_image_embeds</strong> (<code>List[torch.Tensor]</code>, <em>optional</em>) &#x2014;
Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
IP-adapters. Each element should be a tensor of shape <code>(batch_size, num_images, emb_dim)</code>. It should
contain the negative image embedding if <code>do_classifier_free_guidance</code> is set to <code>True</code>. If not
provided, embeddings are computed from the <code>ip_adapter_image</code> input argument.`,name:"ip_adapter_image_embeds"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;pil&quot;</code>) &#x2014;
The output format of the generated image. Choose between <code>PIL.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
Whether or not to return a <a href="/docs/diffusers/pr_12403/en/api/pipelines/stable_diffusion/text2img#diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput">StableDiffusionPipelineOutput</a> instead of a
plain tuple.`,name:"return_dict"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.__call__.cross_attention_kwargs",description:`<strong>cross_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014;
A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under
<code>self.processor</code> in
<a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"cross_attention_kwargs"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.__call__.guidance_rescale",description:`<strong>guidance_rescale</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014;
A rescaling factor for the guidance embeddings. A value of 0.0 means no rescaling is applied.`,name:"guidance_rescale"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.__call__.circular_padding",description:`<strong>circular_padding</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>False</code>) &#x2014;
If set to <code>True</code>, circular padding is applied to ensure there are no stitching artifacts. Circular
padding allows the model to seamlessly generate a transition from the rightmost part of the image to
the leftmost part, maintaining consistency in a 360-degree sense.`,name:"circular_padding"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.__call__.clip_skip",description:`<strong>clip_skip</strong> (<code>int</code>, <em>optional</em>) &#x2014;
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.`,name:"clip_skip"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) &#x2014;
A function that calls at the end of each denoising steps during the inference. The function is called
with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by
<code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List[str]</code>, <em>optional</em>) &#x2014;
The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list
will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the
<code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"}],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py#L801",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p>If <code>return_dict</code> is <code>True</code>, <a
href="/docs/diffusers/pr_12403/en/api/pipelines/stable_diffusion/text2img#diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput"
>StableDiffusionPipelineOutput</a> is returned,
otherwise a <code>tuple</code> is returned where the first element is a list with the generated images and the
second element is a list of <code>bool</code>s indicating whether the corresponding generated image contains
“not-safe-for-work” (nsfw) content.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><a
href="/docs/diffusers/pr_12403/en/api/pipelines/stable_diffusion/text2img#diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput"
>StableDiffusionPipelineOutput</a> or <code>tuple</code></p>
`}}),U=new Tt({props:{anchor:"diffusers.StableDiffusionPanoramaPipeline.__call__.example",$$slots:{default:[Mt]},$$scope:{ctx:se}}}),X=new oe({props:{name:"decode_latents_with_padding",anchor:"diffusers.StableDiffusionPanoramaPipeline.decode_latents_with_padding",parameters:[{name:"latents",val:": Tensor"},{name:"padding",val:": int = 8"}],parametersDescription:[{anchor:"diffusers.StableDiffusionPanoramaPipeline.decode_latents_with_padding.latents",description:"<strong>latents</strong> (torch.Tensor) &#x2014; The input latents to decode.",name:"latents"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.decode_latents_with_padding.padding",description:"<strong>padding</strong> (int, optional) &#x2014; The number of latents to add on each side for padding. Defaults to 8.",name:"padding"}],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py#L563",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p>The decoded image with padding removed.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p>torch.Tensor</p>
`}}),K=new oe({props:{name:"encode_prompt",anchor:"diffusers.StableDiffusionPanoramaPipeline.encode_prompt",parameters:[{name:"prompt",val:""},{name:"device",val:""},{name:"num_images_per_prompt",val:""},{name:"do_classifier_free_guidance",val:""},{name:"negative_prompt",val:" = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"lora_scale",val:": typing.Optional[float] = None"},{name:"clip_skip",val:": typing.Optional[int] = None"}],parametersDescription:[{anchor:"diffusers.StableDiffusionPanoramaPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
prompt to be encoded`,name:"prompt"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.encode_prompt.device",description:`<strong>device</strong> &#x2014; (<code>torch.device</code>):
torch device`,name:"device"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) &#x2014;
number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>) &#x2014;
whether to use classifier free guidance or not`,name:"do_classifier_free_guidance"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation. If not defined, one has to pass
<code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is
less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input
argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.encode_prompt.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>, <em>optional</em>) &#x2014;
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.`,name:"lora_scale"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.encode_prompt.clip_skip",description:`<strong>clip_skip</strong> (<code>int</code>, <em>optional</em>) &#x2014;
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.`,name:"clip_skip"}],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py#L283"}}),ee=new oe({props:{name:"get_guidance_scale_embedding",anchor:"diffusers.StableDiffusionPanoramaPipeline.get_guidance_scale_embedding",parameters:[{name:"w",val:": Tensor"},{name:"embedding_dim",val:": int = 512"},{name:"dtype",val:": dtype = torch.float32"}],parametersDescription:[{anchor:"diffusers.StableDiffusionPanoramaPipeline.get_guidance_scale_embedding.w",description:`<strong>w</strong> (<code>torch.Tensor</code>) &#x2014;
Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.`,name:"w"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.get_guidance_scale_embedding.embedding_dim",description:`<strong>embedding_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 512) &#x2014;
Dimension of the embeddings to generate.`,name:"embedding_dim"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.get_guidance_scale_embedding.dtype",description:`<strong>dtype</strong> (<code>torch.dtype</code>, <em>optional</em>, defaults to <code>torch.float32</code>) &#x2014;
Data type of the generated embeddings.`,name:"dtype"}],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py#L701",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p>Embedding vectors with shape <code>(len(w), embedding_dim)</code>.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>torch.Tensor</code></p>
`}}),te=new oe({props:{name:"get_views",anchor:"diffusers.StableDiffusionPanoramaPipeline.get_views",parameters:[{name:"panorama_height",val:": int"},{name:"panorama_width",val:": int"},{name:"window_size",val:": int = 64"},{name:"stride",val:": int = 8"},{name:"circular_padding",val:": bool = False"}],parametersDescription:[{anchor:"diffusers.StableDiffusionPanoramaPipeline.get_views.panorama_height",description:"<strong>panorama_height</strong> (int) &#x2014; The height of the panorama.",name:"panorama_height"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.get_views.panorama_width",description:"<strong>panorama_width</strong> (int) &#x2014; The width of the panorama.",name:"panorama_width"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.get_views.window_size",description:"<strong>window_size</strong> (int, optional) &#x2014; The size of the window. Defaults to 64.",name:"window_size"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.get_views.stride",description:"<strong>stride</strong> (int, optional) &#x2014; The stride value. Defaults to 8.",name:"stride"},{anchor:"diffusers.StableDiffusionPanoramaPipeline.get_views.circular_padding",description:"<strong>circular_padding</strong> (bool, optional) &#x2014; Whether to apply circular padding. Defaults to False.",name:"circular_padding"}],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py#L731",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p>A list of tuples representing the views. Each tuple contains four integers
representing the start and end coordinates of the window in the panorama.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p>List[Tuple[int, int, int, int]]</p>
`}}),ne=new Ee({props:{title:"StableDiffusionPipelineOutput",local:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput",headingTag:"h2"}}),ie=new oe({props:{name:"class diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput",anchor:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput",parameters:[{name:"images",val:": typing.Union[typing.List[PIL.Image.Image], numpy.ndarray]"},{name:"nsfw_content_detected",val:": typing.Optional[typing.List[bool]]"}],parametersDescription:[{anchor:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput.images",description:`<strong>images</strong> (<code>List[PIL.Image.Image]</code> or <code>np.ndarray</code>) &#x2014;
List of denoised PIL images of length <code>batch_size</code> or NumPy array of shape <code>(batch_size, height, width, num_channels)</code>.`,name:"images"},{anchor:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput.nsfw_content_detected",description:`<strong>nsfw_content_detected</strong> (<code>List[bool]</code>) &#x2014;
List indicating whether the corresponding generated image contains &#x201C;not-safe-for-work&#x201D; (nsfw) content or
<code>None</code> if safety checking could not be performed.`,name:"nsfw_content_detected"}],source:"https://github.com/huggingface/diffusers/blob/vr_12403/src/diffusers/pipelines/stable_diffusion/pipeline_output.py#L11"}}),ae=new St({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/panorama.md"}}),{c(){p=s("meta"),D=a(),f=s("p"),P=a(),y=s("blockquote"),y.innerHTML=c,L=a(),g(z.$$.fragment),he=a(),I=s("div"),I.innerHTML=et,_e=a(),E=s("p"),E.innerHTML=tt,be=a(),N=s("p"),N.textContent=nt,ve=a(),Z=s("p"),Z.innerHTML=it,we=a(),A=s("p"),A.innerHTML=at,Pe=a(),g(V.$$.fragment),ye=a(),q=s("p"),q.innerHTML=ot,xe=a(),J=s("p"),J.textContent=st,$e=a(),R=s("p"),R.innerHTML=rt,De=a(),B=s("p"),B.innerHTML=lt,Te=a(),F=s("p"),F.innerHTML=pt,Se=a(),g(C.$$.fragment),ke=a(),g(W.$$.fragment),Me=a(),m=s("div"),g(Y.$$.fragment),Ne=a(),T=s("div"),g(Q.$$.fragment),Ze=a(),re=s("p"),re.textContent=dt,Ae=a(),g(U.$$.fragment),Ve=a(),x=s("div"),g(X.$$.fragment),qe=a(),le=s("p"),le.textContent=ct,Je=a(),pe=s("p"),pe.textContent=mt,Re=a(),de=s("ul"),de.innerHTML=ft,Be=a(),H=s("div"),g(K.$$.fragment),Fe=a(),ce=s("p"),ce.textContent=ut,We=a(),j=s("div"),g(ee.$$.fragment),Ye=a(),me=s("p"),me.innerHTML=gt,Qe=a(),G=s("div"),g(te.$$.fragment),Xe=a(),fe=s("p"),fe.innerHTML=ht,Le=a(),g(ne.$$.fragment),Ie=a(),S=s("div"),g(ie.$$.fragment),Ke=a(),ue=s("p"),ue.textContent=_t,Ce=a(),g(ae.$$.fragment),Ue=a(),ge=s("p"),this.h()},l(e){const t=xt("svelte-u9bgzb",document.head);p=r(t,"META",{name:!0,content:!0}),t.forEach(n),D=o(e),f=r(e,"P",{}),M(f).forEach(n),P=o(e),y=r(e,"BLOCKQUOTE",{class:!0,"data-svelte-h":!0}),d(y)!=="svelte-124tjg"&&(y.innerHTML=c),L=o(e),h(z.$$.fragment,e),he=o(e),I=r(e,"DIV",{class:!0,"data-svelte-h":!0}),d(I)!=="svelte-si9ct8"&&(I.innerHTML=et),_e=o(e),E=r(e,"P",{"data-svelte-h":!0}),d(E)!=="svelte-iubh1u"&&(E.innerHTML=tt),be=o(e),N=r(e,"P",{"data-svelte-h":!0}),d(N)!=="svelte-1cwsb16"&&(N.textContent=nt),ve=o(e),Z=r(e,"P",{"data-svelte-h":!0}),d(Z)!=="svelte-muikgy"&&(Z.innerHTML=it),we=o(e),A=r(e,"P",{"data-svelte-h":!0}),d(A)!=="svelte-nmxwil"&&(A.innerHTML=at),Pe=o(e),h(V.$$.fragment,e),ye=o(e),q=r(e,"P",{"data-svelte-h":!0}),d(q)!=="svelte-1ez7ykl"&&(q.innerHTML=ot),xe=o(e),J=r(e,"P",{"data-svelte-h":!0}),d(J)!=="svelte-10nxbu3"&&(J.textContent=st),$e=o(e),R=r(e,"P",{"data-svelte-h":!0}),d(R)!=="svelte-1mn2x9s"&&(R.innerHTML=rt),De=o(e),B=r(e,"P",{"data-svelte-h":!0}),d(B)!=="svelte-1etj7av"&&(B.innerHTML=lt),Te=o(e),F=r(e,"P",{"data-svelte-h":!0}),d(F)!=="svelte-qdh84m"&&(F.innerHTML=pt),Se=o(e),h(C.$$.fragment,e),ke=o(e),h(W.$$.fragment,e),Me=o(e),m=r(e,"DIV",{class:!0});var u=M(m);h(Y.$$.fragment,u),Ne=o(u),T=r(u,"DIV",{class:!0});var k=M(T);h(Q.$$.fragment,k),Ze=o(k),re=r(k,"P",{"data-svelte-h":!0}),d(re)!=="svelte-50j04k"&&(re.textContent=dt),Ae=o(k),h(U.$$.fragment,k),k.forEach(n),Ve=o(u),x=r(u,"DIV",{class:!0});var O=M(x);h(X.$$.fragment,O),qe=o(O),le=r(O,"P",{"data-svelte-h":!0}),d(le)!=="svelte-11enoo7"&&(le.textContent=ct),Je=o(O),pe=r(O,"P",{"data-svelte-h":!0}),d(pe)!=="svelte-1biq3pv"&&(pe.textContent=mt),Re=o(O),de=r(O,"UL",{"data-svelte-h":!0}),d(de)!=="svelte-mb20u9"&&(de.innerHTML=ft),O.forEach(n),Be=o(u),H=r(u,"DIV",{class:!0});var je=M(H);h(K.$$.fragment,je),Fe=o(je),ce=r(je,"P",{"data-svelte-h":!0}),d(ce)!=="svelte-16q0ax1"&&(ce.textContent=ut),je.forEach(n),We=o(u),j=r(u,"DIV",{class:!0});var Ge=M(j);h(ee.$$.fragment,Ge),Ye=o(Ge),me=r(Ge,"P",{"data-svelte-h":!0}),d(me)!=="svelte-vo59ec"&&(me.innerHTML=gt),Ge.forEach(n),Qe=o(u),G=r(u,"DIV",{class:!0});var Oe=M(G);h(te.$$.fragment,Oe),Xe=o(Oe),fe=r(Oe,"P",{"data-svelte-h":!0}),d(fe)!=="svelte-zs34v9"&&(fe.innerHTML=ht),Oe.forEach(n),u.forEach(n),Le=o(e),h(ne.$$.fragment,e),Ie=o(e),S=r(e,"DIV",{class:!0});var ze=M(S);h(ie.$$.fragment,ze),Ke=o(ze),ue=r(ze,"P",{"data-svelte-h":!0}),d(ue)!=="svelte-1qpjiuf"&&(ue.textContent=_t),ze.forEach(n),Ce=o(e),h(ae.$$.fragment,e),Ue=o(e),ge=r(e,"P",{}),M(ge).forEach(n),this.h()},h(){$(p,"name","hf:doc:metadata"),$(p,"content",It),$(y,"class","warning"),$(I,"class","flex flex-wrap space-x-1"),$(T,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(x,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(H,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(j,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(G,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(m,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(S,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,t){l(document.head,p),i(e,D,t),i(e,f,t),i(e,P,t),i(e,y,t),i(e,L,t),_(z,e,t),i(e,he,t),i(e,I,t),i(e,_e,t),i(e,E,t),i(e,be,t),i(e,N,t),i(e,ve,t),i(e,Z,t),i(e,we,t),i(e,A,t),i(e,Pe,t),_(V,e,t),i(e,ye,t),i(e,q,t),i(e,xe,t),i(e,J,t),i(e,$e,t),i(e,R,t),i(e,De,t),i(e,B,t),i(e,Te,t),i(e,F,t),i(e,Se,t),_(C,e,t),i(e,ke,t),_(W,e,t),i(e,Me,t),i(e,m,t),_(Y,m,null),l(m,Ne),l(m,T),_(Q,T,null),l(T,Ze),l(T,re),l(T,Ae),_(U,T,null),l(m,Ve),l(m,x),_(X,x,null),l(x,qe),l(x,le),l(x,Je),l(x,pe),l(x,Re),l(x,de),l(m,Be),l(m,H),_(K,H,null),l(H,Fe),l(H,ce),l(m,We),l(m,j),_(ee,j,null),l(j,Ye),l(j,me),l(m,Qe),l(m,G),_(te,G,null),l(G,Xe),l(G,fe),i(e,Le,t),_(ne,e,t),i(e,Ie,t),i(e,S,t),_(ie,S,null),l(S,Ke),l(S,ue),i(e,Ce,t),_(ae,e,t),i(e,Ue,t),i(e,ge,t),He=!0},p(e,[t]){const u={};t&2&&(u.$$scope={dirty:t,ctx:e}),C.$set(u);const k={};t&2&&(k.$$scope={dirty:t,ctx:e}),U.$set(k)},i(e){He||(b(z.$$.fragment,e),b(V.$$.fragment,e),b(C.$$.fragment,e),b(W.$$.fragment,e),b(Y.$$.fragment,e),b(Q.$$.fragment,e),b(U.$$.fragment,e),b(X.$$.fragment,e),b(K.$$.fragment,e),b(ee.$$.fragment,e),b(te.$$.fragment,e),b(ne.$$.fragment,e),b(ie.$$.fragment,e),b(ae.$$.fragment,e),He=!0)},o(e){v(z.$$.fragment,e),v(V.$$.fragment,e),v(C.$$.fragment,e),v(W.$$.fragment,e),v(Y.$$.fragment,e),v(Q.$$.fragment,e),v(U.$$.fragment,e),v(X.$$.fragment,e),v(K.$$.fragment,e),v(ee.$$.fragment,e),v(te.$$.fragment,e),v(ne.$$.fragment,e),v(ie.$$.fragment,e),v(ae.$$.fragment,e),He=!1},d(e){e&&(n(D),n(f),n(P),n(y),n(L),n(he),n(I),n(_e),n(E),n(be),n(N),n(ve),n(Z),n(we),n(A),n(Pe),n(ye),n(q),n(xe),n(J),n($e),n(R),n(De),n(B),n(Te),n(F),n(Se),n(ke),n(Me),n(m),n(Le),n(Ie),n(S),n(Ce),n(Ue),n(ge)),n(p),w(z,e),w(V,e),w(C,e),w(W,e),w(Y),w(Q),w(U),w(X),w(K),w(ee),w(te),w(ne,e),w(ie),w(ae,e)}}}const It='{"title":"MultiDiffusion","local":"multidiffusion","sections":[{"title":"Tips","local":"tips","sections":[],"depth":2},{"title":"StableDiffusionPanoramaPipeline","local":"diffusers.StableDiffusionPanoramaPipeline","sections":[],"depth":2},{"title":"StableDiffusionPipelineOutput","local":"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput","sections":[],"depth":2}],"depth":1}';function Ct(se){return wt(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Nt extends Pt{constructor(p){super(),yt(this,p,Ct,Lt,vt,{})}}export{Nt as component};

Xet Storage Details

Size:
38.5 kB
·
Xet hash:
8640686a16d20333859de1cca8c0e2e35da3b5b7eb1d5773e6f9aada63056d73

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.