Buckets:

rtrm's picture
download
raw
30 kB
import{s as Ne,o as Ue,n as Le}from"../chunks/scheduler.8c3d61f6.js";import{S as Oe,i as je,g as l,s as o,r as b,A as Ee,h as p,f as n,c as s,j as X,u as v,x as g,k as F,y as r,a,v as S,d as y,t as x,w}from"../chunks/index.da70eac4.js";import{T as Je}from"../chunks/Tip.1d9b8c37.js";import{D as ce}from"../chunks/Docstring.fa488882.js";import{C as ze}from"../chunks/CodeBlock.a9c4becf.js";import{E as qe}from"../chunks/ExampleCodeBlock.ec9feb8f.js";import{H as xe,E as He}from"../chunks/index.dfbaf638.js";function We(z){let i,P='Make sure to check out the Schedulers <a href="../../using-diffusers/schedulers">guide</a> to learn how to explore the tradeoff between scheduler speed and quality, and see the <a href="../../using-diffusers/loading#reuse-a-pipeline">reuse components across pipelines</a> section to learn how to efficiently load the same components into multiple pipelines.';return{c(){i=l("p"),i.innerHTML=P},l(m){i=p(m,"P",{"data-svelte-h":!0}),g(i)!=="svelte-1qn15hi"&&(i.innerHTML=P)},m(m,u){a(m,i,u)},p:Le,d(m){m&&n(i)}}}function Re(z){let i,P="Examples:",m,u,h;return u=new ze({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwU3RhYmxlRGlmZnVzaW9uU0FHUGlwZWxpbmUlMEElMEFwaXBlJTIwJTNEJTIwU3RhYmxlRGlmZnVzaW9uU0FHUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMnN0YWJsZS1kaWZmdXNpb24tdjEtNSUyRnN0YWJsZS1kaWZmdXNpb24tdjEtNSUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUwQSklMEFwaXBlJTIwJTNEJTIwcGlwZS50byglMjJjdWRhJTIyKSUwQSUwQXByb21wdCUyMCUzRCUyMCUyMmElMjBwaG90byUyMG9mJTIwYW4lMjBhc3Ryb25hdXQlMjByaWRpbmclMjBhJTIwaG9yc2UlMjBvbiUyMG1hcnMlMjIlMEFpbWFnZSUyMCUzRCUyMHBpcGUocHJvbXB0JTJDJTIwc2FnX3NjYWxlJTNEMC43NSkuaW1hZ2VzJTVCMCU1RA==",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionSAGPipeline
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe = StableDiffusionSAGPipeline.from_pretrained(
<span class="hljs-meta">... </span> <span class="hljs-string">&quot;stable-diffusion-v1-5/stable-diffusion-v1-5&quot;</span>, torch_dtype=torch.float16
<span class="hljs-meta">... </span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe = pipe.to(<span class="hljs-string">&quot;cuda&quot;</span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;a photo of an astronaut riding a horse on mars&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>image = pipe(prompt, sag_scale=<span class="hljs-number">0.75</span>).images[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){i=l("p"),i.textContent=P,m=o(),b(u.$$.fragment)},l(d){i=p(d,"P",{"data-svelte-h":!0}),g(i)!=="svelte-kvfsh7"&&(i.textContent=P),m=s(d),v(u.$$.fragment,d)},m(d,_){a(d,i,_),a(d,m,_),S(u,d,_),h=!0},p:Le,i(d){h||(y(u.$$.fragment,d),h=!0)},o(d){x(u.$$.fragment,d),h=!1},d(d){d&&(n(i),n(m)),w(u,d)}}}function Ve(z){let i,P,m,u,h,d,_,we='<a href="https://huggingface.co/papers/2210.00939" rel="nofollow">Improving Sample Quality of Diffusion Models Using Self-Attention Guidance</a> is by Susung Hong et al.',Q,I,Pe="The abstract from the paper is:",K,k,De="<em>Denoising diffusion models (DDMs) have attracted attention for their exceptional generation quality and diversity. This success is largely attributed to the use of class- or text-conditional diffusion guidance methods, such as classifier and classifier-free guidance. In this paper, we present a more comprehensive perspective that goes beyond the traditional guidance methods. From this generalized perspective, we introduce novel condition- and training-free strategies to enhance the quality of generated images. As a simple solution, blur guidance improves the suitability of intermediate samples for their fine-scale information and structures, enabling diffusion models to generate higher quality samples with a moderate guidance scale. Improving upon this, Self-Attention Guidance (SAG) uses the intermediate self-attention maps of diffusion models to enhance their stability and efficacy. Specifically, SAG adversarially blurs only the regions that diffusion models attend to at each iteration and guides them accordingly. Our experimental results show that our SAG improves the performance of various diffusion models, including ADM, IDDPM, Stable Diffusion, and DiT. Moreover, combining SAG with conventional guidance methods leads to further improvement.</em>",ee,M,$e='You can find additional information about Self-Attention Guidance on the <a href="https://ku-cvlab.github.io/Self-Attention-Guidance" rel="nofollow">project page</a>, <a href="https://github.com/KU-CVLAB/Self-Attention-Guidance" rel="nofollow">original codebase</a>, and try it out in a <a href="https://huggingface.co/spaces/susunghong/Self-Attention-Guidance" rel="nofollow">demo</a> or <a href="https://colab.research.google.com/github/SusungHong/Self-Attention-Guidance/blob/main/SAG_Stable.ipynb" rel="nofollow">notebook</a>.',te,T,ne,L,ie,c,N,fe,q,Ae="Pipeline for text-to-image generation using Stable Diffusion.",me,H,Te=`This model inherits from <a href="/docs/diffusers/pr_11415/en/api/pipelines/overview#diffusers.DiffusionPipeline">DiffusionPipeline</a>. Check the superclass documentation for the generic methods
implemented for all pipelines (downloading, saving, running on a particular device, etc.).`,ue,W,Ge="The pipeline also inherits the following loading methods:",ge,R,Ce='<li><a href="/docs/diffusers/pr_11415/en/api/loaders/textual_inversion#diffusers.loaders.TextualInversionLoaderMixin.load_textual_inversion">load_textual_inversion()</a> for loading textual inversion embeddings</li> <li><a href="/docs/diffusers/pr_11415/en/api/loaders/ip_adapter#diffusers.loaders.IPAdapterMixin.load_ip_adapter">load_ip_adapter()</a> for loading IP Adapters</li>',he,D,U,_e,V,Ie="The call function to the pipeline for generation.",be,G,ve,C,O,Se,Z,ke="Encodes the prompt into text encoder hidden states.",oe,j,se,$,E,ye,B,Me="Output class for Stable Diffusion pipelines.",ae,J,re,Y,le;return h=new xe({props:{title:"Self-Attention Guidance",local:"self-attention-guidance",headingTag:"h1"}}),T=new Je({props:{$$slots:{default:[We]},$$scope:{ctx:z}}}),L=new xe({props:{title:"StableDiffusionSAGPipeline",local:"diffusers.StableDiffusionSAGPipeline",headingTag:"h2"}}),N=new ce({props:{name:"class diffusers.StableDiffusionSAGPipeline",anchor:"diffusers.StableDiffusionSAGPipeline",parameters:[{name:"vae",val:": AutoencoderKL"},{name:"text_encoder",val:": CLIPTextModel"},{name:"tokenizer",val:": CLIPTokenizer"},{name:"unet",val:": UNet2DConditionModel"},{name:"scheduler",val:": KarrasDiffusionSchedulers"},{name:"safety_checker",val:": StableDiffusionSafetyChecker"},{name:"feature_extractor",val:": CLIPImageProcessor"},{name:"image_encoder",val:": typing.Optional[transformers.models.clip.modeling_clip.CLIPVisionModelWithProjection] = None"},{name:"requires_safety_checker",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.StableDiffusionSAGPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_11415/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) &#x2014;
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.StableDiffusionSAGPipeline.text_encoder",description:`<strong>text_encoder</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPTextModel" rel="nofollow">CLIPTextModel</a>) &#x2014;
Frozen text-encoder (<a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a>).`,name:"text_encoder"},{anchor:"diffusers.StableDiffusionSAGPipeline.tokenizer",description:`<strong>tokenizer</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>) &#x2014;
A <code>CLIPTokenizer</code> to tokenize text.`,name:"tokenizer"},{anchor:"diffusers.StableDiffusionSAGPipeline.unet",description:`<strong>unet</strong> (<a href="/docs/diffusers/pr_11415/en/api/models/unet2d-cond#diffusers.UNet2DConditionModel">UNet2DConditionModel</a>) &#x2014;
A <code>UNet2DConditionModel</code> to denoise the encoded image latents.`,name:"unet"},{anchor:"diffusers.StableDiffusionSAGPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_11415/en/api/schedulers/overview#diffusers.SchedulerMixin">SchedulerMixin</a>) &#x2014;
A scheduler to be used in combination with <code>unet</code> to denoise the encoded image latents. Can be one of
<a href="/docs/diffusers/pr_11415/en/api/schedulers/ddim#diffusers.DDIMScheduler">DDIMScheduler</a>, <a href="/docs/diffusers/pr_11415/en/api/schedulers/lms_discrete#diffusers.LMSDiscreteScheduler">LMSDiscreteScheduler</a>, or <a href="/docs/diffusers/pr_11415/en/api/schedulers/pndm#diffusers.PNDMScheduler">PNDMScheduler</a>.`,name:"scheduler"},{anchor:"diffusers.StableDiffusionSAGPipeline.safety_checker",description:`<strong>safety_checker</strong> (<code>StableDiffusionSafetyChecker</code>) &#x2014;
Classification module that estimates whether generated images could be considered offensive or harmful.
Please refer to the <a href="https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5" rel="nofollow">model card</a> for
more details about a model&#x2019;s potential harms.`,name:"safety_checker"},{anchor:"diffusers.StableDiffusionSAGPipeline.feature_extractor",description:`<strong>feature_extractor</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPImageProcessor" rel="nofollow">CLIPImageProcessor</a>) &#x2014;
A <code>CLIPImageProcessor</code> to extract features from generated images; used as inputs to the <code>safety_checker</code>.`,name:"feature_extractor"}],source:"https://github.com/huggingface/diffusers/blob/vr_11415/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py#L110"}}),U=new ce({props:{name:"__call__",anchor:"diffusers.StableDiffusionSAGPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"num_inference_steps",val:": int = 50"},{name:"guidance_scale",val:": float = 7.5"},{name:"sag_scale",val:": float = 0.75"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"eta",val:": float = 0.0"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"ip_adapter_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor], NoneType] = None"},{name:"ip_adapter_image_embeds",val:": typing.Optional[typing.List[torch.Tensor]] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"callback",val:": typing.Optional[typing.Callable[[int, int, torch.Tensor], NoneType]] = None"},{name:"callback_steps",val:": typing.Optional[int] = 1"},{name:"cross_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"clip_skip",val:": typing.Optional[int] = None"}],parametersDescription:[{anchor:"diffusers.StableDiffusionSAGPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts to guide image generation. If not defined, you need to pass <code>prompt_embeds</code>.`,name:"prompt"},{anchor:"diffusers.StableDiffusionSAGPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to <code>self.unet.config.sample_size * self.vae_scale_factor</code>) &#x2014;
The height in pixels of the generated image.`,name:"height"},{anchor:"diffusers.StableDiffusionSAGPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to <code>self.unet.config.sample_size * self.vae_scale_factor</code>) &#x2014;
The width in pixels of the generated image.`,name:"width"},{anchor:"diffusers.StableDiffusionSAGPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) &#x2014;
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.StableDiffusionSAGPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 7.5) &#x2014;
A higher guidance scale value encourages the model to generate images closely linked to the text
<code>prompt</code> at the expense of lower image quality. Guidance scale is enabled when <code>guidance_scale &gt; 1</code>.`,name:"guidance_scale"},{anchor:"diffusers.StableDiffusionSAGPipeline.__call__.sag_scale",description:`<strong>sag_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 0.75) &#x2014;
Chosen between [0, 1.0] for better quality.`,name:"sag_scale"},{anchor:"diffusers.StableDiffusionSAGPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
pass <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (<code>guidance_scale &lt; 1</code>).`,name:"negative_prompt"},{anchor:"diffusers.StableDiffusionSAGPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.StableDiffusionSAGPipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014;
Corresponds to parameter eta (&#x3B7;) from the <a href="https://arxiv.org/abs/2010.02502" rel="nofollow">DDIM</a> paper. Only applies
to the <a href="/docs/diffusers/pr_11415/en/api/schedulers/ddim#diffusers.DDIMScheduler">DDIMScheduler</a>, and is ignored in other schedulers.`,name:"eta"},{anchor:"diffusers.StableDiffusionSAGPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) &#x2014;
A <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow"><code>torch.Generator</code></a> to make
generation deterministic.`,name:"generator"},{anchor:"diffusers.StableDiffusionSAGPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor is generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.StableDiffusionSAGPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
provided, text embeddings are generated from the <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.StableDiffusionSAGPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
not provided, <code>negative_prompt_embeds</code> are generated from the <code>negative_prompt</code> input argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.StableDiffusionSAGPipeline.__call__.ip_adapter_image",description:`<strong>ip_adapter_image</strong> &#x2014; (<code>PipelineImageInput</code>, <em>optional</em>):
Optional image input to work with IP Adapters.`,name:"ip_adapter_image"},{anchor:"diffusers.StableDiffusionSAGPipeline.__call__.ip_adapter_image_embeds",description:`<strong>ip_adapter_image_embeds</strong> (<code>List[torch.Tensor]</code>, <em>optional</em>) &#x2014;
Pre-generated image embeddings for IP-Adapter. If not provided, embeddings are computed from the
<code>ip_adapter_image</code> input argument.`,name:"ip_adapter_image_embeds"},{anchor:"diffusers.StableDiffusionSAGPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;pil&quot;</code>) &#x2014;
The output format of the generated image. Choose between <code>PIL.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.StableDiffusionSAGPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
Whether or not to return a <a href="/docs/diffusers/pr_11415/en/api/pipelines/stable_diffusion/depth2img#diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput">StableDiffusionPipelineOutput</a> instead of a
plain tuple.`,name:"return_dict"},{anchor:"diffusers.StableDiffusionSAGPipeline.__call__.callback",description:`<strong>callback</strong> (<code>Callable</code>, <em>optional</em>) &#x2014;
A function that calls every <code>callback_steps</code> steps during inference. The function is called with the
following arguments: <code>callback(step: int, timestep: int, latents: torch.Tensor)</code>.`,name:"callback"},{anchor:"diffusers.StableDiffusionSAGPipeline.__call__.callback_steps",description:`<strong>callback_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
The frequency at which the <code>callback</code> function is called. If not specified, the callback is called at
every step.`,name:"callback_steps"},{anchor:"diffusers.StableDiffusionSAGPipeline.__call__.cross_attention_kwargs",description:`<strong>cross_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014;
A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined in
<a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow"><code>self.processor</code></a>.`,name:"cross_attention_kwargs"},{anchor:"diffusers.StableDiffusionSAGPipeline.__call__.clip_skip",description:`<strong>clip_skip</strong> (<code>int</code>, <em>optional</em>) &#x2014;
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.`,name:"clip_skip"}],source:"https://github.com/huggingface/diffusers/blob/vr_11415/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py#L569",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p>If <code>return_dict</code> is <code>True</code>, <a
href="/docs/diffusers/pr_11415/en/api/pipelines/stable_diffusion/depth2img#diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput"
>StableDiffusionPipelineOutput</a> is returned,
otherwise a <code>tuple</code> is returned where the first element is a list with the generated images and the
second element is a list of <code>bool</code>s indicating whether the corresponding generated image contains
“not-safe-for-work” (nsfw) content.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><a
href="/docs/diffusers/pr_11415/en/api/pipelines/stable_diffusion/depth2img#diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput"
>StableDiffusionPipelineOutput</a> or <code>tuple</code></p>
`}}),G=new qe({props:{anchor:"diffusers.StableDiffusionSAGPipeline.__call__.example",$$slots:{default:[Re]},$$scope:{ctx:z}}}),O=new ce({props:{name:"encode_prompt",anchor:"diffusers.StableDiffusionSAGPipeline.encode_prompt",parameters:[{name:"prompt",val:""},{name:"device",val:""},{name:"num_images_per_prompt",val:""},{name:"do_classifier_free_guidance",val:""},{name:"negative_prompt",val:" = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"lora_scale",val:": typing.Optional[float] = None"},{name:"clip_skip",val:": typing.Optional[int] = None"}],parametersDescription:[{anchor:"diffusers.StableDiffusionSAGPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
prompt to be encoded`,name:"prompt"},{anchor:"diffusers.StableDiffusionSAGPipeline.encode_prompt.device",description:`<strong>device</strong> &#x2014; (<code>torch.device</code>):
torch device`,name:"device"},{anchor:"diffusers.StableDiffusionSAGPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) &#x2014;
number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.StableDiffusionSAGPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>) &#x2014;
whether to use classifier free guidance or not`,name:"do_classifier_free_guidance"},{anchor:"diffusers.StableDiffusionSAGPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation. If not defined, one has to pass
<code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is
less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.StableDiffusionSAGPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.StableDiffusionSAGPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input
argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.StableDiffusionSAGPipeline.encode_prompt.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>, <em>optional</em>) &#x2014;
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.`,name:"lora_scale"},{anchor:"diffusers.StableDiffusionSAGPipeline.encode_prompt.clip_skip",description:`<strong>clip_skip</strong> (<code>int</code>, <em>optional</em>) &#x2014;
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.`,name:"clip_skip"}],source:"https://github.com/huggingface/diffusers/blob/vr_11415/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py#L207"}}),j=new xe({props:{title:"StableDiffusionOutput",local:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput",headingTag:"h2"}}),E=new ce({props:{name:"class diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput",anchor:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput",parameters:[{name:"images",val:": typing.Union[typing.List[PIL.Image.Image], numpy.ndarray]"},{name:"nsfw_content_detected",val:": typing.Optional[typing.List[bool]]"}],parametersDescription:[{anchor:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput.images",description:`<strong>images</strong> (<code>List[PIL.Image.Image]</code> or <code>np.ndarray</code>) &#x2014;
List of denoised PIL images of length <code>batch_size</code> or NumPy array of shape <code>(batch_size, height, width, num_channels)</code>.`,name:"images"},{anchor:"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput.nsfw_content_detected",description:`<strong>nsfw_content_detected</strong> (<code>List[bool]</code>) &#x2014;
List indicating whether the corresponding generated image contains &#x201C;not-safe-for-work&#x201D; (nsfw) content or
<code>None</code> if safety checking could not be performed.`,name:"nsfw_content_detected"}],source:"https://github.com/huggingface/diffusers/blob/vr_11415/src/diffusers/pipelines/stable_diffusion/pipeline_output.py#L10"}}),J=new He({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/self_attention_guidance.md"}}),{c(){i=l("meta"),P=o(),m=l("p"),u=o(),b(h.$$.fragment),d=o(),_=l("p"),_.innerHTML=we,Q=o(),I=l("p"),I.textContent=Pe,K=o(),k=l("p"),k.innerHTML=De,ee=o(),M=l("p"),M.innerHTML=$e,te=o(),b(T.$$.fragment),ne=o(),b(L.$$.fragment),ie=o(),c=l("div"),b(N.$$.fragment),fe=o(),q=l("p"),q.textContent=Ae,me=o(),H=l("p"),H.innerHTML=Te,ue=o(),W=l("p"),W.textContent=Ge,ge=o(),R=l("ul"),R.innerHTML=Ce,he=o(),D=l("div"),b(U.$$.fragment),_e=o(),V=l("p"),V.textContent=Ie,be=o(),b(G.$$.fragment),ve=o(),C=l("div"),b(O.$$.fragment),Se=o(),Z=l("p"),Z.textContent=ke,oe=o(),b(j.$$.fragment),se=o(),$=l("div"),b(E.$$.fragment),ye=o(),B=l("p"),B.textContent=Me,ae=o(),b(J.$$.fragment),re=o(),Y=l("p"),this.h()},l(e){const t=Ee("svelte-u9bgzb",document.head);i=p(t,"META",{name:!0,content:!0}),t.forEach(n),P=s(e),m=p(e,"P",{}),X(m).forEach(n),u=s(e),v(h.$$.fragment,e),d=s(e),_=p(e,"P",{"data-svelte-h":!0}),g(_)!=="svelte-uuuzms"&&(_.innerHTML=we),Q=s(e),I=p(e,"P",{"data-svelte-h":!0}),g(I)!=="svelte-1cwsb16"&&(I.textContent=Pe),K=s(e),k=p(e,"P",{"data-svelte-h":!0}),g(k)!=="svelte-1wv0j5a"&&(k.innerHTML=De),ee=s(e),M=p(e,"P",{"data-svelte-h":!0}),g(M)!=="svelte-rke90q"&&(M.innerHTML=$e),te=s(e),v(T.$$.fragment,e),ne=s(e),v(L.$$.fragment,e),ie=s(e),c=p(e,"DIV",{class:!0});var f=X(c);v(N.$$.fragment,f),fe=s(f),q=p(f,"P",{"data-svelte-h":!0}),g(q)!=="svelte-35p306"&&(q.textContent=Ae),me=s(f),H=p(f,"P",{"data-svelte-h":!0}),g(H)!=="svelte-18o0u34"&&(H.innerHTML=Te),ue=s(f),W=p(f,"P",{"data-svelte-h":!0}),g(W)!=="svelte-14s6m4u"&&(W.textContent=Ge),ge=s(f),R=p(f,"UL",{"data-svelte-h":!0}),g(R)!=="svelte-1eg6r42"&&(R.innerHTML=Ce),he=s(f),D=p(f,"DIV",{class:!0});var A=X(D);v(U.$$.fragment,A),_e=s(A),V=p(A,"P",{"data-svelte-h":!0}),g(V)!=="svelte-50j04k"&&(V.textContent=Ie),be=s(A),v(G.$$.fragment,A),A.forEach(n),ve=s(f),C=p(f,"DIV",{class:!0});var pe=X(C);v(O.$$.fragment,pe),Se=s(pe),Z=p(pe,"P",{"data-svelte-h":!0}),g(Z)!=="svelte-16q0ax1"&&(Z.textContent=ke),pe.forEach(n),f.forEach(n),oe=s(e),v(j.$$.fragment,e),se=s(e),$=p(e,"DIV",{class:!0});var de=X($);v(E.$$.fragment,de),ye=s(de),B=p(de,"P",{"data-svelte-h":!0}),g(B)!=="svelte-1qpjiuf"&&(B.textContent=Me),de.forEach(n),ae=s(e),v(J.$$.fragment,e),re=s(e),Y=p(e,"P",{}),X(Y).forEach(n),this.h()},h(){F(i,"name","hf:doc:metadata"),F(i,"content",Ze),F(D,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),F(C,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),F(c,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),F($,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,t){r(document.head,i),a(e,P,t),a(e,m,t),a(e,u,t),S(h,e,t),a(e,d,t),a(e,_,t),a(e,Q,t),a(e,I,t),a(e,K,t),a(e,k,t),a(e,ee,t),a(e,M,t),a(e,te,t),S(T,e,t),a(e,ne,t),S(L,e,t),a(e,ie,t),a(e,c,t),S(N,c,null),r(c,fe),r(c,q),r(c,me),r(c,H),r(c,ue),r(c,W),r(c,ge),r(c,R),r(c,he),r(c,D),S(U,D,null),r(D,_e),r(D,V),r(D,be),S(G,D,null),r(c,ve),r(c,C),S(O,C,null),r(C,Se),r(C,Z),a(e,oe,t),S(j,e,t),a(e,se,t),a(e,$,t),S(E,$,null),r($,ye),r($,B),a(e,ae,t),S(J,e,t),a(e,re,t),a(e,Y,t),le=!0},p(e,[t]){const f={};t&2&&(f.$$scope={dirty:t,ctx:e}),T.$set(f);const A={};t&2&&(A.$$scope={dirty:t,ctx:e}),G.$set(A)},i(e){le||(y(h.$$.fragment,e),y(T.$$.fragment,e),y(L.$$.fragment,e),y(N.$$.fragment,e),y(U.$$.fragment,e),y(G.$$.fragment,e),y(O.$$.fragment,e),y(j.$$.fragment,e),y(E.$$.fragment,e),y(J.$$.fragment,e),le=!0)},o(e){x(h.$$.fragment,e),x(T.$$.fragment,e),x(L.$$.fragment,e),x(N.$$.fragment,e),x(U.$$.fragment,e),x(G.$$.fragment,e),x(O.$$.fragment,e),x(j.$$.fragment,e),x(E.$$.fragment,e),x(J.$$.fragment,e),le=!1},d(e){e&&(n(P),n(m),n(u),n(d),n(_),n(Q),n(I),n(K),n(k),n(ee),n(M),n(te),n(ne),n(ie),n(c),n(oe),n(se),n($),n(ae),n(re),n(Y)),n(i),w(h,e),w(T,e),w(L,e),w(N),w(U),w(G),w(O),w(j,e),w(E),w(J,e)}}}const Ze='{"title":"Self-Attention Guidance","local":"self-attention-guidance","sections":[{"title":"StableDiffusionSAGPipeline","local":"diffusers.StableDiffusionSAGPipeline","sections":[],"depth":2},{"title":"StableDiffusionOutput","local":"diffusers.pipelines.stable_diffusion.StableDiffusionPipelineOutput","sections":[],"depth":2}],"depth":1}';function Be(z){return Ue(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class nt extends Oe{constructor(i){super(),je(this,i,Be,Ve,Ne,{})}}export{nt as component};

Xet Storage Details

Size:
30 kB
·
Xet hash:
515f267b6de648cad1283425fed628dc87609716b82ecc0024f903452cd8e160

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.