Buckets:
| import{s as tn,o as nn,n as Li}from"../chunks/scheduler.8c3d61f6.js";import{S as on,i as sn,g as s,s as i,r as m,A as rn,h as r,f as o,c as n,j as D,u,x as f,k as x,y as t,a as l,v as g,d as _,t as h,w as b,m as an,n as dn}from"../chunks/index.da70eac4.js";import{T as Yi}from"../chunks/Tip.1d9b8c37.js";import{D as T}from"../chunks/Docstring.9419aa1d.js";import{C as en}from"../chunks/CodeBlock.a9c4becf.js";import{E as Ki}from"../chunks/ExampleCodeBlock.1b2603c3.js";import{H as it,E as ln}from"../chunks/getInferenceSnippets.39110341.js";function pn($){let d,E='You can find additional information about LEDITS++ on the <a href="https://leditsplusplus-project.static.hf.space/index.html" rel="nofollow">project page</a> and try it out in a <a href="https://huggingface.co/spaces/editing-images/leditsplusplus" rel="nofollow">demo</a>.';return{c(){d=s("p"),d.innerHTML=E},l(L){d=r(L,"P",{"data-svelte-h":!0}),f(d)!=="svelte-rdvyhm"&&(d.innerHTML=E)},m(L,w){l(L,d,w)},p:Li,d(L){L&&o(d)}}}function cn($){let d;return{c(){d=an(`Due to some backward compatibility issues with the current diffusers implementation of [DPMSolverMultistepScheduler](/docs/diffusers/pr_11660/en/api/schedulers/multistep_dpm_solver#diffusers.DPMSolverMultistepScheduler) this implementation of LEdits++ can no longer guarantee perfect inversion. | |
| This issue is unlikely to have any noticeable effects on applied use-cases. However, we provide an alternative implementation that guarantees perfect inversion in a dedicated [GitHub repo](https://github.com/ml-research/ledits_pp).`)},l(E){d=dn(E,`Due to some backward compatibility issues with the current diffusers implementation of [DPMSolverMultistepScheduler](/docs/diffusers/pr_11660/en/api/schedulers/multistep_dpm_solver#diffusers.DPMSolverMultistepScheduler) this implementation of LEdits++ can no longer guarantee perfect inversion. | |
| This issue is unlikely to have any noticeable effects on applied use-cases. However, we provide an alternative implementation that guarantees perfect inversion in a dedicated [GitHub repo](https://github.com/ml-research/ledits_pp).`)},m(E,L){l(E,d,L)},d(E){E&&o(d)}}}function fn($){let d,E="Examples:",L,w,k;return w=new en({props:{code:"aW1wb3J0JTIwdG9yY2glMEElMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwTEVkaXRzUFBQaXBlbGluZVN0YWJsZURpZmZ1c2lvbiUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBsb2FkX2ltYWdlJTBBJTBBcGlwZSUyMCUzRCUyMExFZGl0c1BQUGlwZWxpbmVTdGFibGVEaWZmdXNpb24uZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMnJ1bndheW1sJTJGc3RhYmxlLWRpZmZ1c2lvbi12MS01JTIyJTJDJTIwdmFyaWFudCUzRCUyMmZwMTYlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYlMEEpJTBBcGlwZS5lbmFibGVfdmFlX3RpbGluZygpJTBBcGlwZSUyMCUzRCUyMHBpcGUudG8oJTIyY3VkYSUyMiklMEElMEFpbWdfdXJsJTIwJTNEJTIwJTIyaHR0cHMlM0ElMkYlMkZ3d3cuYWltbC5pbmZvcm1hdGlrLnR1LWRhcm1zdGFkdC5kZSUyRnBlb3BsZSUyRm1icmFjayUyRmNoZXJyeV9ibG9zc29tLnBuZyUyMiUwQWltYWdlJTIwJTNEJTIwbG9hZF9pbWFnZShpbWdfdXJsKS5yZXNpemUoKDUxMiUyQyUyMDUxMikpJTBBJTBBXyUyMCUzRCUyMHBpcGUuaW52ZXJ0KGltYWdlJTNEaW1hZ2UlMkMlMjBudW1faW52ZXJzaW9uX3N0ZXBzJTNENTAlMkMlMjBza2lwJTNEMC4xKSUwQSUwQWVkaXRlZF9pbWFnZSUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwZWRpdGluZ19wcm9tcHQlM0QlNUIlMjJjaGVycnklMjBibG9zc29tJTIyJTVEJTJDJTIwZWRpdF9ndWlkYW5jZV9zY2FsZSUzRDEwLjAlMkMlMjBlZGl0X3RocmVzaG9sZCUzRDAuNzUlMEEpLmltYWdlcyU1QjAlNUQ=",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> LEditsPPPipelineStableDiffusion | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| <span class="hljs-meta">>>> </span>pipe = LEditsPPPipelineStableDiffusion.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"runwayml/stable-diffusion-v1-5"</span>, variant=<span class="hljs-string">"fp16"</span>, torch_dtype=torch.float16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipe.enable_vae_tiling() | |
| <span class="hljs-meta">>>> </span>pipe = pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>img_url = <span class="hljs-string">"https://www.aiml.informatik.tu-darmstadt.de/people/mbrack/cherry_blossom.png"</span> | |
| <span class="hljs-meta">>>> </span>image = load_image(img_url).resize((<span class="hljs-number">512</span>, <span class="hljs-number">512</span>)) | |
| <span class="hljs-meta">>>> </span>_ = pipe.invert(image=image, num_inversion_steps=<span class="hljs-number">50</span>, skip=<span class="hljs-number">0.1</span>) | |
| <span class="hljs-meta">>>> </span>edited_image = pipe( | |
| <span class="hljs-meta">... </span> editing_prompt=[<span class="hljs-string">"cherry blossom"</span>], edit_guidance_scale=<span class="hljs-number">10.0</span>, edit_threshold=<span class="hljs-number">0.75</span> | |
| <span class="hljs-meta">... </span>).images[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){d=s("p"),d.textContent=E,L=i(),m(w.$$.fragment)},l(c){d=r(c,"P",{"data-svelte-h":!0}),f(d)!=="svelte-kvfsh7"&&(d.textContent=E),L=n(c),u(w.$$.fragment,c)},m(c,S){l(c,d,S),l(c,L,S),g(w,c,S),k=!0},p:Li,i(c){k||(_(w.$$.fragment,c),k=!0)},o(c){h(w.$$.fragment,c),k=!1},d(c){c&&(o(d),o(L)),b(w,c)}}}function mn($){let d,E="Examples:",L,w,k;return w=new en({props:{code:"aW1wb3J0JTIwdG9yY2glMEElMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwTEVkaXRzUFBQaXBlbGluZVN0YWJsZURpZmZ1c2lvblhMJTBBZnJvbSUyMGRpZmZ1c2Vycy51dGlscyUyMGltcG9ydCUyMGxvYWRfaW1hZ2UlMEElMEFwaXBlJTIwJTNEJTIwTEVkaXRzUFBQaXBlbGluZVN0YWJsZURpZmZ1c2lvblhMLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJzdGFiaWxpdHlhaSUyRnN0YWJsZS1kaWZmdXNpb24teGwtYmFzZS0xLjAlMjIlMkMlMjB2YXJpYW50JTNEJTIyZnAxNiUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUwQSklMEFwaXBlLmVuYWJsZV92YWVfdGlsaW5nKCklMEFwaXBlJTIwJTNEJTIwcGlwZS50byglMjJjdWRhJTIyKSUwQSUwQWltZ191cmwlMjAlM0QlMjAlMjJodHRwcyUzQSUyRiUyRnd3dy5haW1sLmluZm9ybWF0aWsudHUtZGFybXN0YWR0LmRlJTJGcGVvcGxlJTJGbWJyYWNrJTJGdGVubmlzLmpwZyUyMiUwQWltYWdlJTIwJTNEJTIwbG9hZF9pbWFnZShpbWdfdXJsKS5yZXNpemUoKDEwMjQlMkMlMjAxMDI0KSklMEElMEFfJTIwJTNEJTIwcGlwZS5pbnZlcnQoaW1hZ2UlM0RpbWFnZSUyQyUyMG51bV9pbnZlcnNpb25fc3RlcHMlM0Q1MCUyQyUyMHNraXAlM0QwLjIpJTBBJTBBZWRpdGVkX2ltYWdlJTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjBlZGl0aW5nX3Byb21wdCUzRCU1QiUyMnRlbm5pcyUyMGJhbGwlMjIlMkMlMjAlMjJ0b21hdG8lMjIlNUQlMkMlMEElMjAlMjAlMjAlMjByZXZlcnNlX2VkaXRpbmdfZGlyZWN0aW9uJTNEJTVCVHJ1ZSUyQyUyMEZhbHNlJTVEJTJDJTBBJTIwJTIwJTIwJTIwZWRpdF9ndWlkYW5jZV9zY2FsZSUzRCU1QjUuMCUyQyUyMDEwLjAlNUQlMkMlMEElMjAlMjAlMjAlMjBlZGl0X3RocmVzaG9sZCUzRCU1QjAuOSUyQyUyMDAuODUlNUQlMkMlMEEpLmltYWdlcyU1QjAlNUQ=",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> LEditsPPPipelineStableDiffusionXL | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| <span class="hljs-meta">>>> </span>pipe = LEditsPPPipelineStableDiffusionXL.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"stabilityai/stable-diffusion-xl-base-1.0"</span>, variant=<span class="hljs-string">"fp16"</span>, torch_dtype=torch.float16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipe.enable_vae_tiling() | |
| <span class="hljs-meta">>>> </span>pipe = pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>img_url = <span class="hljs-string">"https://www.aiml.informatik.tu-darmstadt.de/people/mbrack/tennis.jpg"</span> | |
| <span class="hljs-meta">>>> </span>image = load_image(img_url).resize((<span class="hljs-number">1024</span>, <span class="hljs-number">1024</span>)) | |
| <span class="hljs-meta">>>> </span>_ = pipe.invert(image=image, num_inversion_steps=<span class="hljs-number">50</span>, skip=<span class="hljs-number">0.2</span>) | |
| <span class="hljs-meta">>>> </span>edited_image = pipe( | |
| <span class="hljs-meta">... </span> editing_prompt=[<span class="hljs-string">"tennis ball"</span>, <span class="hljs-string">"tomato"</span>], | |
| <span class="hljs-meta">... </span> reverse_editing_direction=[<span class="hljs-literal">True</span>, <span class="hljs-literal">False</span>], | |
| <span class="hljs-meta">... </span> edit_guidance_scale=[<span class="hljs-number">5.0</span>, <span class="hljs-number">10.0</span>], | |
| <span class="hljs-meta">... </span> edit_threshold=[<span class="hljs-number">0.9</span>, <span class="hljs-number">0.85</span>], | |
| <span class="hljs-meta">... </span>).images[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){d=s("p"),d.textContent=E,L=i(),m(w.$$.fragment)},l(c){d=r(c,"P",{"data-svelte-h":!0}),f(d)!=="svelte-kvfsh7"&&(d.textContent=E),L=n(c),u(w.$$.fragment,c)},m(c,S){l(c,d,S),l(c,L,S),g(w,c,S),k=!0},p:Li,i(c){k||(_(w.$$.fragment,c),k=!0)},o(c){h(w.$$.fragment,c),k=!1},d(c){c&&(o(d),o(L)),b(w,c)}}}function un($){let d,E,L,w,k,c,S,yi='<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>',nt,K,wi='LEDITS++ was proposed in <a href="https://huggingface.co/papers/2311.16711" rel="nofollow">LEDITS++: Limitless Image Editing using Text-to-Image Models</a> by Manuel Brack, Felix Friedrich, Katharina Kornmeier, Linoy Tsaban, Patrick Schramowski, Kristian Kersting, Apolinário Passos.',ot,ee,xi="The abstract from the paper is:",st,te,Di='<em>Text-to-image diffusion models have recently received increasing interest for their astonishing ability to produce high-fidelity images from solely text inputs. Subsequent research efforts aim to exploit and apply their capabilities to real image editing. However, existing image-to-image methods are often inefficient, imprecise, and of limited versatility. They either require time-consuming fine-tuning, deviate unnecessarily strongly from the input image, and/or lack support for multiple, simultaneous edits. To address these issues, we introduce LEDITS++, an efficient yet versatile and precise textual image manipulation technique. LEDITS++‘s novel inversion approach requires no tuning nor optimization and produces high-fidelity results with a few diffusion steps. Second, our methodology supports multiple simultaneous edits and is architecture-agnostic. Third, we use a novel implicit masking technique that limits changes to relevant image regions. We propose the novel TEdBench++ benchmark as part of our exhaustive evaluation. Our results demonstrate the capabilities of LEDITS++ and its improvements over previous methods. The project page is available at <a href="https://leditsplusplus-project.static.hf.space" rel="nofollow">https://leditsplusplus-project.static.hf.space</a> .</em>',rt,X,at,U,dt,ie,Ei="We provide two distinct pipelines based on different pre-trained models.",lt,ne,pt,v,oe,Ct,Se,Ti="Pipeline for textual image editing using LEDits++ with Stable Diffusion.",Nt,$e,ki=`This model inherits from <a href="/docs/diffusers/pr_11660/en/api/pipelines/overview#diffusers.DiffusionPipeline">DiffusionPipeline</a> and builds on the <a href="/docs/diffusers/pr_11660/en/api/pipelines/stable_diffusion/text2img#diffusers.StableDiffusionPipeline">StableDiffusionPipeline</a>. Check the superclass | |
| documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular | |
| device, etc.).`,jt,I,se,Xt,Ie,Si=`The call function to the pipeline for editing. The | |
| <a href="/docs/diffusers/pr_11660/en/api/pipelines/ledits_pp#diffusers.LEditsPPPipelineStableDiffusion.invert">invert()</a> method has to be called beforehand. Edits will | |
| always be performed for the last inverted image(s).`,Ut,z,zt,J,re,Jt,Me,$i=`The function to the pipeline for image inversion as described by the <a href="https://huggingface.co/papers/2301.12247" rel="nofollow">LEDITS++ | |
| Paper</a>. If the scheduler is set to <a href="/docs/diffusers/pr_11660/en/api/schedulers/ddim#diffusers.DDIMScheduler">DDIMScheduler</a> the | |
| inversion proposed by <a href="https://huggingface.co/papers/2304.06140" rel="nofollow">edit-friendly DPDM</a> will be performed instead.`,At,A,ae,Wt,Ce,Ii=`Disable sliced VAE decoding. If <code>enable_vae_slicing</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,Zt,W,de,Ot,Ne,Mi=`Disable tiled VAE decoding. If <code>enable_vae_tiling</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,Gt,Z,le,Vt,je,Ci=`Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to | |
| compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.`,Bt,O,pe,Rt,Xe,Ni=`Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to | |
| compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow | |
| processing larger images.`,Ft,G,ce,qt,Ue,ji="Encodes the prompt into text encoder hidden states.",ct,fe,ft,p,me,Ht,ze,Xi="Pipeline for textual image editing using LEDits++ with Stable Diffusion XL.",Qt,Je,Ui=`This model inherits from <a href="/docs/diffusers/pr_11660/en/api/pipelines/overview#diffusers.DiffusionPipeline">DiffusionPipeline</a> and builds on the <a href="/docs/diffusers/pr_11660/en/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLPipeline">StableDiffusionXLPipeline</a>. Check the | |
| superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a | |
| particular device, etc.).`,Yt,Ae,zi="In addition the pipeline inherits the following loading methods:",Kt,We,Ji='<li><em>LoRA</em>: <a href="/docs/diffusers/pr_11660/en/api/loaders/lora#diffusers.loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights">LEditsPPPipelineStableDiffusionXL.load_lora_weights()</a></li> <li><em>Ckpt</em>: <a href="/docs/diffusers/pr_11660/en/api/loaders/single_file#diffusers.loaders.FromSingleFileMixin.from_single_file">loaders.FromSingleFileMixin.from_single_file()</a></li>',ei,Ze,Ai="as well as the following saving methods:",ti,Oe,Wi="<li><em>LoRA</em>: <code>loaders.StableDiffusionXLPipeline.save_lora_weights</code></li>",ii,M,ue,ni,Ge,Zi=`The call function to the pipeline for editing. The | |
| <a href="/docs/diffusers/pr_11660/en/api/pipelines/ledits_pp#diffusers.LEditsPPPipelineStableDiffusionXL.invert">invert()</a> method has to be called beforehand. Edits | |
| will always be performed for the last inverted image(s).`,oi,V,si,B,ge,ri,Ve,Oi=`The function to the pipeline for image inversion as described by the <a href="https://huggingface.co/papers/2301.12247" rel="nofollow">LEDITS++ | |
| Paper</a>. If the scheduler is set to <a href="/docs/diffusers/pr_11660/en/api/schedulers/ddim#diffusers.DDIMScheduler">DDIMScheduler</a> the | |
| inversion proposed by <a href="https://huggingface.co/papers/2304.06140" rel="nofollow">edit-friendly DPDM</a> will be performed instead.`,ai,R,_e,di,Be,Gi=`Disable sliced VAE decoding. If <code>enable_vae_slicing</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,li,F,he,pi,Re,Vi=`Disable tiled VAE decoding. If <code>enable_vae_tiling</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,ci,q,be,fi,Fe,Bi=`Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to | |
| compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.`,mi,H,Pe,ui,qe,Ri=`Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to | |
| compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow | |
| processing larger images.`,gi,Q,ve,_i,He,Fi="Encodes the prompt into text encoder hidden states.",hi,Y,Le,bi,Qe,qi='See <a href="https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298" rel="nofollow">https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298</a>',mt,ye,ut,C,we,Pi,Ye,Hi="Output class for LEdits++ Diffusion pipelines.",gt,xe,_t,N,De,vi,Ke,Qi="Output class for LEdits++ Diffusion pipelines.",ht,Ee,bt,tt,Pt;return k=new it({props:{title:"LEDITS++",local:"ledits",headingTag:"h1"}}),X=new Yi({props:{$$slots:{default:[pn]},$$scope:{ctx:$}}}),U=new Yi({props:{warning:!0,$$slots:{default:[cn]},$$scope:{ctx:$}}}),ne=new it({props:{title:"LEditsPPPipelineStableDiffusion",local:"diffusers.LEditsPPPipelineStableDiffusion",headingTag:"h2"}}),oe=new T({props:{name:"class diffusers.LEditsPPPipelineStableDiffusion",anchor:"diffusers.LEditsPPPipelineStableDiffusion",parameters:[{name:"vae",val:": AutoencoderKL"},{name:"text_encoder",val:": CLIPTextModel"},{name:"tokenizer",val:": CLIPTokenizer"},{name:"unet",val:": UNet2DConditionModel"},{name:"scheduler",val:": typing.Union[diffusers.schedulers.scheduling_ddim.DDIMScheduler, diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler]"},{name:"safety_checker",val:": StableDiffusionSafetyChecker"},{name:"feature_extractor",val:": CLIPImageProcessor"},{name:"requires_safety_checker",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.LEditsPPPipelineStableDiffusion.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_11660/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.text_encoder",description:`<strong>text_encoder</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPTextModel" rel="nofollow">CLIPTextModel</a>) — | |
| Frozen text-encoder. Stable Diffusion uses the text portion of | |
| <a href="https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel" rel="nofollow">CLIP</a>, specifically | |
| the <a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a> variant.`,name:"text_encoder"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.tokenizer",description:`<strong>tokenizer</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>) — | |
| Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.unet",description:'<strong>unet</strong> (<a href="/docs/diffusers/pr_11660/en/api/models/unet2d-cond#diffusers.UNet2DConditionModel">UNet2DConditionModel</a>) — Conditional U-Net architecture to denoise the encoded image latents.',name:"unet"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_11660/en/api/schedulers/multistep_dpm_solver#diffusers.DPMSolverMultistepScheduler">DPMSolverMultistepScheduler</a> or <a href="/docs/diffusers/pr_11660/en/api/schedulers/ddim#diffusers.DDIMScheduler">DDIMScheduler</a>) — | |
| A scheduler to be used in combination with <code>unet</code> to denoise the encoded image latens. Can be one of | |
| <a href="/docs/diffusers/pr_11660/en/api/schedulers/multistep_dpm_solver#diffusers.DPMSolverMultistepScheduler">DPMSolverMultistepScheduler</a> or <a href="/docs/diffusers/pr_11660/en/api/schedulers/ddim#diffusers.DDIMScheduler">DDIMScheduler</a>. If any other scheduler is passed it will | |
| automatically be set to <a href="/docs/diffusers/pr_11660/en/api/schedulers/multistep_dpm_solver#diffusers.DPMSolverMultistepScheduler">DPMSolverMultistepScheduler</a>.`,name:"scheduler"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.safety_checker",description:`<strong>safety_checker</strong> (<code>StableDiffusionSafetyChecker</code>) — | |
| Classification module that estimates whether generated images could be considered offensive or harmful. | |
| Please, refer to the <a href="https://huggingface.co/CompVis/stable-diffusion-v1-4" rel="nofollow">model card</a> for details.`,name:"safety_checker"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.feature_extractor",description:`<strong>feature_extractor</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPImageProcessor" rel="nofollow">CLIPImageProcessor</a>) — | |
| Model that extracts features from generated images to be used as inputs for the <code>safety_checker</code>.`,name:"feature_extractor"}],source:"https://github.com/huggingface/diffusers/blob/vr_11660/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py#L269"}}),se=new T({props:{name:"__call__",anchor:"diffusers.LEditsPPPipelineStableDiffusion.__call__",parameters:[{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"editing_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"editing_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"reverse_editing_direction",val:": typing.Union[bool, typing.List[bool], NoneType] = False"},{name:"edit_guidance_scale",val:": typing.Union[float, typing.List[float], NoneType] = 5"},{name:"edit_warmup_steps",val:": typing.Union[int, typing.List[int], NoneType] = 0"},{name:"edit_cooldown_steps",val:": typing.Union[int, typing.List[int], NoneType] = None"},{name:"edit_threshold",val:": typing.Union[float, typing.List[float], NoneType] = 0.9"},{name:"user_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"sem_guidance",val:": typing.Optional[typing.List[torch.Tensor]] = None"},{name:"use_cross_attn_mask",val:": bool = False"},{name:"use_intersect_mask",val:": bool = True"},{name:"attn_store_steps",val:": typing.Optional[typing.List[int]] = []"},{name:"store_averaged_over_steps",val:": bool = True"},{name:"cross_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"guidance_rescale",val:": float = 0.0"},{name:"clip_skip",val:": typing.Optional[int] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.LEditsPPPipelineStableDiffusion.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored | |
| if <code>guidance_scale</code> is less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <a href="/docs/diffusers/pr_11660/en/api/pipelines/ledits_pp#diffusers.pipelines.LEditsPPDiffusionPipelineOutput">LEditsPPDiffusionPipelineOutput</a> instead of a plain | |
| tuple.`,name:"return_dict"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.__call__.editing_prompt",description:`<strong>editing_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. The image is reconstructed by setting | |
| <code>editing_prompt = None</code>. Guidance direction of prompt should be specified via | |
| <code>reverse_editing_direction</code>.`,name:"editing_prompt"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.__call__.editing_prompt_embeds",description:`<strong>editing_prompt_embeds</strong> (<code>torch.Tensor></code>, <em>optional</em>) — | |
| Pre-computed embeddings to use for guiding the image generation. Guidance direction of embedding should | |
| be specified via <code>reverse_editing_direction</code>.`,name:"editing_prompt_embeds"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If | |
| not provided, <code>negative_prompt_embeds</code> are generated from the <code>negative_prompt</code> input argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.__call__.reverse_editing_direction",description:`<strong>reverse_editing_direction</strong> (<code>bool</code> or <code>List[bool]</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether the corresponding prompt in <code>editing_prompt</code> should be increased or decreased.`,name:"reverse_editing_direction"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.__call__.edit_guidance_scale",description:`<strong>edit_guidance_scale</strong> (<code>float</code> or <code>List[float]</code>, <em>optional</em>, defaults to 5) — | |
| Guidance scale for guiding the image generation. If provided as list values should correspond to | |
| <code>editing_prompt</code>. <code>edit_guidance_scale</code> is defined as <code>s_e</code> of equation 12 of <a href="https://huggingface.co/papers/2301.12247" rel="nofollow">LEDITS++ | |
| Paper</a>.`,name:"edit_guidance_scale"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.__call__.edit_warmup_steps",description:`<strong>edit_warmup_steps</strong> (<code>float</code> or <code>List[float]</code>, <em>optional</em>, defaults to 10) — | |
| Number of diffusion steps (for each prompt) for which guidance will not be applied.`,name:"edit_warmup_steps"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.__call__.edit_cooldown_steps",description:`<strong>edit_cooldown_steps</strong> (<code>float</code> or <code>List[float]</code>, <em>optional</em>, defaults to <code>None</code>) — | |
| Number of diffusion steps (for each prompt) after which guidance will no longer be applied.`,name:"edit_cooldown_steps"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.__call__.edit_threshold",description:`<strong>edit_threshold</strong> (<code>float</code> or <code>List[float]</code>, <em>optional</em>, defaults to 0.9) — | |
| Masking threshold of guidance. Threshold should be proportional to the image region that is modified. | |
| ‘edit_threshold’ is defined as ‘λ’ of equation 12 of <a href="https://huggingface.co/papers/2301.12247" rel="nofollow">LEDITS++ | |
| Paper</a>.`,name:"edit_threshold"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.__call__.user_mask",description:`<strong>user_mask</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| User-provided mask for even better control over the editing process. This is helpful when LEDITS++‘s | |
| implicit masks do not meet user preferences.`,name:"user_mask"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.__call__.sem_guidance",description:`<strong>sem_guidance</strong> (<code>List[torch.Tensor]</code>, <em>optional</em>) — | |
| List of pre-generated guidance vectors to be applied at generation. Length of the list has to | |
| correspond to <code>num_inference_steps</code>.`,name:"sem_guidance"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.__call__.use_cross_attn_mask",description:`<strong>use_cross_attn_mask</strong> (<code>bool</code>, defaults to <code>False</code>) — | |
| Whether cross-attention masks are used. Cross-attention masks are always used when use_intersect_mask | |
| is set to true. Cross-attention masks are defined as ‘M^1’ of equation 12 of <a href="https://huggingface.co/papers/2311.16711" rel="nofollow">LEDITS++ | |
| paper</a>.`,name:"use_cross_attn_mask"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.__call__.use_intersect_mask",description:`<strong>use_intersect_mask</strong> (<code>bool</code>, defaults to <code>True</code>) — | |
| Whether the masking term is calculated as intersection of cross-attention masks and masks derived from | |
| the noise estimate. Cross-attention mask are defined as ‘M^1’ and masks derived from the noise estimate | |
| are defined as ‘M^2’ of equation 12 of <a href="https://huggingface.co/papers/2311.16711" rel="nofollow">LEDITS++ paper</a>.`,name:"use_intersect_mask"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.__call__.attn_store_steps",description:`<strong>attn_store_steps</strong> (<code>List[int]</code>, <em>optional</em>) — | |
| Steps for which the attention maps are stored in the AttentionStore. Just for visualization purposes.`,name:"attn_store_steps"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.__call__.store_averaged_over_steps",description:`<strong>store_averaged_over_steps</strong> (<code>bool</code>, defaults to <code>True</code>) — | |
| Whether the attention maps for the ‘attn_store_steps’ are stored averaged over the diffusion steps. If | |
| False, attention maps for each step are stores separately. Just for visualization purposes.`,name:"store_averaged_over_steps"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.__call__.cross_attention_kwargs",description:`<strong>cross_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow"><code>self.processor</code></a>.`,name:"cross_attention_kwargs"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.__call__.guidance_rescale",description:`<strong>guidance_rescale</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) — | |
| Guidance rescale factor from <a href="https://huggingface.co/papers/2305.08891" rel="nofollow">Common Diffusion Noise Schedules and Sample Steps are | |
| Flawed</a>. Guidance rescale factor should fix overexposure when | |
| using zero terminal SNR.`,name:"guidance_rescale"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.__call__.clip_skip",description:`<strong>clip_skip</strong> (<code>int</code>, <em>optional</em>) — | |
| Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that | |
| the output of the pre-final layer will be used for computing the prompt embeddings.`,name:"clip_skip"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"}],source:"https://github.com/huggingface/diffusers/blob/vr_11660/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py#L749",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><a | |
| href="/docs/diffusers/pr_11660/en/api/pipelines/ledits_pp#diffusers.pipelines.LEditsPPDiffusionPipelineOutput" | |
| >LEditsPPDiffusionPipelineOutput</a> if <code>return_dict</code> is True, otherwise a <code>tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of </code>bool<code>s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the </code>safety_checker\`.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><a | |
| href="/docs/diffusers/pr_11660/en/api/pipelines/ledits_pp#diffusers.pipelines.LEditsPPDiffusionPipelineOutput" | |
| >LEditsPPDiffusionPipelineOutput</a> or <code>tuple</code></p> | |
| `}}),z=new Ki({props:{anchor:"diffusers.LEditsPPPipelineStableDiffusion.__call__.example",$$slots:{default:[fn]},$$scope:{ctx:$}}}),re=new T({props:{name:"invert",anchor:"diffusers.LEditsPPPipelineStableDiffusion.invert",parameters:[{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]]"},{name:"source_prompt",val:": str = ''"},{name:"source_guidance_scale",val:": float = 3.5"},{name:"num_inversion_steps",val:": int = 30"},{name:"skip",val:": float = 0.15"},{name:"generator",val:": typing.Optional[torch._C.Generator] = None"},{name:"cross_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"clip_skip",val:": typing.Optional[int] = None"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"resize_mode",val:": typing.Optional[str] = 'default'"},{name:"crops_coords",val:": typing.Optional[typing.Tuple[int, int, int, int]] = None"}],parametersDescription:[{anchor:"diffusers.LEditsPPPipelineStableDiffusion.invert.image",description:`<strong>image</strong> (<code>PipelineImageInput</code>) — | |
| Input for the image(s) that are to be edited. Multiple input images have to default to the same aspect | |
| ratio.`,name:"image"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.invert.source_prompt",description:`<strong>source_prompt</strong> (<code>str</code>, defaults to <code>""</code>) — | |
| Prompt describing the input image that will be used for guidance during inversion. Guidance is disabled | |
| if the <code>source_prompt</code> is <code>""</code>.`,name:"source_prompt"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.invert.source_guidance_scale",description:`<strong>source_guidance_scale</strong> (<code>float</code>, defaults to <code>3.5</code>) — | |
| Strength of guidance during inversion.`,name:"source_guidance_scale"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.invert.num_inversion_steps",description:`<strong>num_inversion_steps</strong> (<code>int</code>, defaults to <code>30</code>) — | |
| Number of total performed inversion steps after discarding the initial <code>skip</code> steps.`,name:"num_inversion_steps"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.invert.skip",description:`<strong>skip</strong> (<code>float</code>, defaults to <code>0.15</code>) — | |
| Portion of initial steps that will be ignored for inversion and subsequent generation. Lower values | |
| will lead to stronger changes to the input image. <code>skip</code> has to be between <code>0</code> and <code>1</code>.`,name:"skip"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.invert.generator",description:`<strong>generator</strong> (<code>torch.Generator</code>, <em>optional</em>) — | |
| A <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow"><code>torch.Generator</code></a> to make inversion | |
| deterministic.`,name:"generator"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.invert.cross_attention_kwargs",description:`<strong>cross_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow"><code>self.processor</code></a>.`,name:"cross_attention_kwargs"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.invert.clip_skip",description:`<strong>clip_skip</strong> (<code>int</code>, <em>optional</em>) — | |
| Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that | |
| the output of the pre-final layer will be used for computing the prompt embeddings.`,name:"clip_skip"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.invert.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to <code>None</code>) — | |
| The height in preprocessed image. If <code>None</code>, will use the <code>get_default_height_width()</code> to get default | |
| height.`,name:"height"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.invert.width",description:"<strong>width</strong> (<code>int</code>, <em>optional</em><code>, defaults to </code>None<code>) -- The width in preprocessed. If </code>None<code>, will use get_default_height_width()</code> to get the default width.",name:"width"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.invert.resize_mode",description:`<strong>resize_mode</strong> (<code>str</code>, <em>optional</em>, defaults to <code>default</code>) — | |
| The resize mode, can be one of <code>default</code> or <code>fill</code>. If <code>default</code>, will resize the image to fit within | |
| the specified width and height, and it may not maintaining the original aspect ratio. If <code>fill</code>, will | |
| resize the image to fit within the specified width and height, maintaining the aspect ratio, and then | |
| center the image within the dimensions, filling empty with data from image. If <code>crop</code>, will resize the | |
| image to fit within the specified width and height, maintaining the aspect ratio, and then center the | |
| image within the dimensions, cropping the excess. Note that resize_mode <code>fill</code> and <code>crop</code> are only | |
| supported for PIL image input.`,name:"resize_mode"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.invert.crops_coords",description:`<strong>crops_coords</strong> (<code>List[Tuple[int, int, int, int]]</code>, <em>optional</em>, defaults to <code>None</code>) — | |
| The crop coordinates for each image in the batch. If <code>None</code>, will not crop the image.`,name:"crops_coords"}],source:"https://github.com/huggingface/diffusers/blob/vr_11660/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py#L1253",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>Output will contain the resized input image(s) | |
| and respective VAE reconstruction(s).</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><a | |
| href="/docs/diffusers/pr_11660/en/api/pipelines/ledits_pp#diffusers.pipelines.LEditsPPInversionPipelineOutput" | |
| >LEditsPPInversionPipelineOutput</a></p> | |
| `}}),ae=new T({props:{name:"disable_vae_slicing",anchor:"diffusers.LEditsPPPipelineStableDiffusion.disable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11660/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py#L727"}}),de=new T({props:{name:"disable_vae_tiling",anchor:"diffusers.LEditsPPPipelineStableDiffusion.disable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11660/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py#L742"}}),le=new T({props:{name:"enable_vae_slicing",anchor:"diffusers.LEditsPPPipelineStableDiffusion.enable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11660/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py#L720"}}),pe=new T({props:{name:"enable_vae_tiling",anchor:"diffusers.LEditsPPPipelineStableDiffusion.enable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11660/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py#L734"}}),ce=new T({props:{name:"encode_prompt",anchor:"diffusers.LEditsPPPipelineStableDiffusion.encode_prompt",parameters:[{name:"device",val:""},{name:"num_images_per_prompt",val:""},{name:"enable_edit_guidance",val:""},{name:"negative_prompt",val:" = None"},{name:"editing_prompt",val:" = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"editing_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"lora_scale",val:": typing.Optional[float] = None"},{name:"clip_skip",val:": typing.Optional[int] = None"}],parametersDescription:[{anchor:"diffusers.LEditsPPPipelineStableDiffusion.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>): | |
| torch device`,name:"device"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.encode_prompt.enable_edit_guidance",description:`<strong>enable_edit_guidance</strong> (<code>bool</code>) — | |
| whether to perform any editing or reconstruct the input image instead`,name:"enable_edit_guidance"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is | |
| less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.encode_prompt.editing_prompt",description:`<strong>editing_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| Editing prompt(s) to be encoded. If not defined, one has to pass <code>editing_prompt_embeds</code> instead.`,name:"editing_prompt"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.encode_prompt.editing_prompt_embeds",description:`<strong>editing_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"editing_prompt_embeds"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.encode_prompt.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>, <em>optional</em>) — | |
| A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.`,name:"lora_scale"},{anchor:"diffusers.LEditsPPPipelineStableDiffusion.encode_prompt.clip_skip",description:`<strong>clip_skip</strong> (<code>int</code>, <em>optional</em>) — | |
| Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that | |
| the output of the pre-final layer will be used for computing the prompt embeddings.`,name:"clip_skip"}],source:"https://github.com/huggingface/diffusers/blob/vr_11660/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py#L521"}}),fe=new it({props:{title:"LEditsPPPipelineStableDiffusionXL",local:"diffusers.LEditsPPPipelineStableDiffusionXL",headingTag:"h2"}}),me=new T({props:{name:"class diffusers.LEditsPPPipelineStableDiffusionXL",anchor:"diffusers.LEditsPPPipelineStableDiffusionXL",parameters:[{name:"vae",val:": AutoencoderKL"},{name:"text_encoder",val:": CLIPTextModel"},{name:"text_encoder_2",val:": CLIPTextModelWithProjection"},{name:"tokenizer",val:": CLIPTokenizer"},{name:"tokenizer_2",val:": CLIPTokenizer"},{name:"unet",val:": UNet2DConditionModel"},{name:"scheduler",val:": typing.Union[diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler, diffusers.schedulers.scheduling_ddim.DDIMScheduler]"},{name:"image_encoder",val:": CLIPVisionModelWithProjection = None"},{name:"feature_extractor",val:": CLIPImageProcessor = None"},{name:"force_zeros_for_empty_prompt",val:": bool = True"},{name:"add_watermarker",val:": typing.Optional[bool] = None"}],parametersDescription:[{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_11660/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.text_encoder",description:`<strong>text_encoder</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPTextModel" rel="nofollow">CLIPTextModel</a>) — | |
| Frozen text-encoder. Stable Diffusion XL uses the text portion of | |
| <a href="https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel" rel="nofollow">CLIP</a>, specifically | |
| the <a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a> variant.`,name:"text_encoder"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.text_encoder_2",description:`<strong>text_encoder_2</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPTextModelWithProjection" rel="nofollow">CLIPTextModelWithProjection</a>) — | |
| Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of | |
| <a href="https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection" rel="nofollow">CLIP</a>, | |
| specifically the | |
| <a href="https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k" rel="nofollow">laion/CLIP-ViT-bigG-14-laion2B-39B-b160k</a> | |
| variant.`,name:"text_encoder_2"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.tokenizer",description:`<strong>tokenizer</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>) — | |
| Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.tokenizer_2",description:`<strong>tokenizer_2</strong> (<a href="https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>) — | |
| Second Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer_2"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.unet",description:'<strong>unet</strong> (<a href="/docs/diffusers/pr_11660/en/api/models/unet2d-cond#diffusers.UNet2DConditionModel">UNet2DConditionModel</a>) — Conditional U-Net architecture to denoise the encoded image latents.',name:"unet"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_11660/en/api/schedulers/multistep_dpm_solver#diffusers.DPMSolverMultistepScheduler">DPMSolverMultistepScheduler</a> or <a href="/docs/diffusers/pr_11660/en/api/schedulers/ddim#diffusers.DDIMScheduler">DDIMScheduler</a>) — | |
| A scheduler to be used in combination with <code>unet</code> to denoise the encoded image latens. Can be one of | |
| <a href="/docs/diffusers/pr_11660/en/api/schedulers/multistep_dpm_solver#diffusers.DPMSolverMultistepScheduler">DPMSolverMultistepScheduler</a> or <a href="/docs/diffusers/pr_11660/en/api/schedulers/ddim#diffusers.DDIMScheduler">DDIMScheduler</a>. If any other scheduler is passed it will | |
| automatically be set to <a href="/docs/diffusers/pr_11660/en/api/schedulers/multistep_dpm_solver#diffusers.DPMSolverMultistepScheduler">DPMSolverMultistepScheduler</a>.`,name:"scheduler"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.force_zeros_for_empty_prompt",description:`<strong>force_zeros_for_empty_prompt</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>"True"</code>) — | |
| Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of | |
| <code>stabilityai/stable-diffusion-xl-base-1-0</code>.`,name:"force_zeros_for_empty_prompt"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.add_watermarker",description:`<strong>add_watermarker</strong> (<code>bool</code>, <em>optional</em>) — | |
| Whether to use the <a href="https://github.com/ShieldMnt/invisible-watermark/" rel="nofollow">invisible_watermark library</a> to | |
| watermark output images. If not defined, it will default to True if the package is installed, otherwise no | |
| watermarker will be used.`,name:"add_watermarker"}],source:"https://github.com/huggingface/diffusers/blob/vr_11660/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py#L274"}}),ue=new T({props:{name:"__call__",anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__",parameters:[{name:"denoising_end",val:": typing.Optional[float] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"negative_prompt_2",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_pooled_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"ip_adapter_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor], NoneType] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"cross_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"guidance_rescale",val:": float = 0.0"},{name:"crops_coords_top_left",val:": typing.Tuple[int, int] = (0, 0)"},{name:"target_size",val:": typing.Optional[typing.Tuple[int, int]] = None"},{name:"editing_prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"editing_prompt_embeddings",val:": typing.Optional[torch.Tensor] = None"},{name:"editing_pooled_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"reverse_editing_direction",val:": typing.Union[bool, typing.List[bool], NoneType] = False"},{name:"edit_guidance_scale",val:": typing.Union[float, typing.List[float], NoneType] = 5"},{name:"edit_warmup_steps",val:": typing.Union[int, typing.List[int], NoneType] = 0"},{name:"edit_cooldown_steps",val:": typing.Union[int, typing.List[int], NoneType] = None"},{name:"edit_threshold",val:": typing.Union[float, typing.List[float], NoneType] = 0.9"},{name:"sem_guidance",val:": typing.Optional[typing.List[torch.Tensor]] = None"},{name:"use_cross_attn_mask",val:": bool = False"},{name:"use_intersect_mask",val:": bool = False"},{name:"user_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"attn_store_steps",val:": typing.Optional[typing.List[int]] = []"},{name:"store_averaged_over_steps",val:": bool = True"},{name:"clip_skip",val:": typing.Optional[int] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.denoising_end",description:`<strong>denoising_end</strong> (<code>float</code>, <em>optional</em>) — | |
| When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be | |
| completed before it is intentionally prematurely terminated. As a result, the returned sample will | |
| still retain a substantial amount of noise as determined by the discrete timesteps selected by the | |
| scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a | |
| “Mixture of Denoisers” multi-pipeline setup, as elaborated in [**Refining the Image`,name:"denoising_end"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is | |
| less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.negative_prompt_2",description:`<strong>negative_prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation to be sent to <code>tokenizer_2</code> and | |
| <code>text_encoder_2</code>. If not defined, <code>negative_prompt</code> is used in both text-encoders`,name:"negative_prompt_2"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.negative_pooled_prompt_embeds",description:`<strong>negative_pooled_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, pooled negative_prompt_embeds will be generated from <code>negative_prompt</code> | |
| input argument.`,name:"negative_pooled_prompt_embeds"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.ip_adapter_image",description:`<strong>ip_adapter_image</strong> — (<code>PipelineImageInput</code>, <em>optional</em>): | |
| Optional image input to work with IP Adapters.`,name:"ip_adapter_image"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput</code> instead | |
| of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.callback",description:`<strong>callback</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that will be called every <code>callback_steps</code> steps during inference. The function will be | |
| called with the following arguments: <code>callback(step: int, timestep: int, latents: torch.Tensor)</code>.`,name:"callback"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.callback_steps",description:`<strong>callback_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The frequency at which the <code>callback</code> function will be called. If not specified, the callback will be | |
| called at every step.`,name:"callback_steps"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.cross_attention_kwargs",description:`<strong>cross_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"cross_attention_kwargs"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.guidance_rescale",description:`<strong>guidance_rescale</strong> (<code>float</code>, <em>optional</em>, defaults to 0.7) — | |
| Guidance rescale factor proposed by <a href="https://huggingface.co/papers/2305.08891" rel="nofollow">Common Diffusion Noise Schedules and Sample Steps are | |
| Flawed</a> <code>guidance_scale</code> is defined as <code>φ</code> in equation 16. of | |
| <a href="https://huggingface.co/papers/2305.08891" rel="nofollow">Common Diffusion Noise Schedules and Sample Steps are | |
| Flawed</a>. Guidance rescale factor should fix overexposure when | |
| using zero terminal SNR.`,name:"guidance_rescale"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.crops_coords_top_left",description:`<strong>crops_coords_top_left</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to (0, 0)) — | |
| <code>crops_coords_top_left</code> can be used to generate an image that appears to be “cropped” from the position | |
| <code>crops_coords_top_left</code> downwards. Favorable, well-centered images are usually achieved by setting | |
| <code>crops_coords_top_left</code> to (0, 0). Part of SDXL’s micro-conditioning as explained in section 2.2 of | |
| <a href="https://huggingface.co/papers/2307.01952" rel="nofollow">https://huggingface.co/papers/2307.01952</a>.`,name:"crops_coords_top_left"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.target_size",description:`<strong>target_size</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to (1024, 1024)) — | |
| For most cases, <code>target_size</code> should be set to the desired height and width of the generated image. If | |
| not specified it will default to <code>(width, height)</code>. Part of SDXL’s micro-conditioning as explained in | |
| section 2.2 of <a href="https://huggingface.co/papers/2307.01952" rel="nofollow">https://huggingface.co/papers/2307.01952</a>.`,name:"target_size"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.editing_prompt",description:`<strong>editing_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. The image is reconstructed by setting | |
| <code>editing_prompt = None</code>. Guidance direction of prompt should be specified via | |
| <code>reverse_editing_direction</code>.`,name:"editing_prompt"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.editing_prompt_embeddings",description:`<strong>editing_prompt_embeddings</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated edit text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. | |
| If not provided, editing_prompt_embeddings will be generated from <code>editing_prompt</code> input argument.`,name:"editing_prompt_embeddings"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.editing_pooled_prompt_embeddings",description:`<strong>editing_pooled_prompt_embeddings</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated pooled edit text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, editing_prompt_embeddings will be generated from <code>editing_prompt</code> input | |
| argument.`,name:"editing_pooled_prompt_embeddings"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.reverse_editing_direction",description:`<strong>reverse_editing_direction</strong> (<code>bool</code> or <code>List[bool]</code>, <em>optional</em>, defaults to <code>False</code>) — | |
| Whether the corresponding prompt in <code>editing_prompt</code> should be increased or decreased.`,name:"reverse_editing_direction"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.edit_guidance_scale",description:`<strong>edit_guidance_scale</strong> (<code>float</code> or <code>List[float]</code>, <em>optional</em>, defaults to 5) — | |
| Guidance scale for guiding the image generation. If provided as list values should correspond to | |
| <code>editing_prompt</code>. <code>edit_guidance_scale</code> is defined as <code>s_e</code> of equation 12 of <a href="https://huggingface.co/papers/2301.12247" rel="nofollow">LEDITS++ | |
| Paper</a>.`,name:"edit_guidance_scale"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.edit_warmup_steps",description:`<strong>edit_warmup_steps</strong> (<code>float</code> or <code>List[float]</code>, <em>optional</em>, defaults to 10) — | |
| Number of diffusion steps (for each prompt) for which guidance is not applied.`,name:"edit_warmup_steps"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.edit_cooldown_steps",description:`<strong>edit_cooldown_steps</strong> (<code>float</code> or <code>List[float]</code>, <em>optional</em>, defaults to <code>None</code>) — | |
| Number of diffusion steps (for each prompt) after which guidance is no longer applied.`,name:"edit_cooldown_steps"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.edit_threshold",description:`<strong>edit_threshold</strong> (<code>float</code> or <code>List[float]</code>, <em>optional</em>, defaults to 0.9) — | |
| Masking threshold of guidance. Threshold should be proportional to the image region that is modified. | |
| ‘edit_threshold’ is defined as ‘λ’ of equation 12 of <a href="https://huggingface.co/papers/2301.12247" rel="nofollow">LEDITS++ | |
| Paper</a>.`,name:"edit_threshold"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.sem_guidance",description:`<strong>sem_guidance</strong> (<code>List[torch.Tensor]</code>, <em>optional</em>) — | |
| List of pre-generated guidance vectors to be applied at generation. Length of the list has to | |
| correspond to <code>num_inference_steps</code>.`,name:"sem_guidance"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.use_cross_attn_mask",description:`<strong>use_cross_attn_mask</strong> — | |
| Whether cross-attention masks are used. Cross-attention masks are always used when use_intersect_mask | |
| is set to true. Cross-attention masks are defined as ‘M^1’ of equation 12 of <a href="https://huggingface.co/papers/2311.16711" rel="nofollow">LEDITS++ | |
| paper</a>.`,name:"use_cross_attn_mask"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.use_intersect_mask",description:`<strong>use_intersect_mask</strong> — | |
| Whether the masking term is calculated as intersection of cross-attention masks and masks derived from | |
| the noise estimate. Cross-attention mask are defined as ‘M^1’ and masks derived from the noise estimate | |
| are defined as ‘M^2’ of equation 12 of <a href="https://huggingface.co/papers/2311.16711" rel="nofollow">LEDITS++ paper</a>.`,name:"use_intersect_mask"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.user_mask",description:`<strong>user_mask</strong> — | |
| User-provided mask for even better control over the editing process. This is helpful when LEDITS++‘s | |
| implicit masks do not meet user preferences.`,name:"user_mask"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.attn_store_steps",description:`<strong>attn_store_steps</strong> — | |
| Steps for which the attention maps are stored in the AttentionStore. Just for visualization purposes.`,name:"attn_store_steps"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.store_averaged_over_steps",description:`<strong>store_averaged_over_steps</strong> — | |
| Whether the attention maps for the ‘attn_store_steps’ are stored averaged over the diffusion steps. If | |
| False, attention maps for each step are stores separately. Just for visualization purposes.`,name:"store_averaged_over_steps"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.clip_skip",description:`<strong>clip_skip</strong> (<code>int</code>, <em>optional</em>) — | |
| Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that | |
| the output of the pre-final layer will be used for computing the prompt embeddings.`,name:"clip_skip"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"}],source:"https://github.com/huggingface/diffusers/blob/vr_11660/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py#L822",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><a | |
| href="/docs/diffusers/pr_11660/en/api/pipelines/ledits_pp#diffusers.pipelines.LEditsPPDiffusionPipelineOutput" | |
| >LEditsPPDiffusionPipelineOutput</a> if <code>return_dict</code> is True, otherwise a \`tuple. When | |
| returning a tuple, the first element is a list with the generated images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><a | |
| href="/docs/diffusers/pr_11660/en/api/pipelines/ledits_pp#diffusers.pipelines.LEditsPPDiffusionPipelineOutput" | |
| >LEditsPPDiffusionPipelineOutput</a> or <code>tuple</code></p> | |
| `}}),V=new Ki({props:{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.__call__.example",$$slots:{default:[mn]},$$scope:{ctx:$}}}),ge=new T({props:{name:"invert",anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.invert",parameters:[{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]]"},{name:"source_prompt",val:": str = ''"},{name:"source_guidance_scale",val:" = 3.5"},{name:"negative_prompt",val:": str = None"},{name:"negative_prompt_2",val:": str = None"},{name:"num_inversion_steps",val:": int = 50"},{name:"skip",val:": float = 0.15"},{name:"generator",val:": typing.Optional[torch._C.Generator] = None"},{name:"crops_coords_top_left",val:": typing.Tuple[int, int] = (0, 0)"},{name:"num_zero_noise_steps",val:": int = 3"},{name:"cross_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"resize_mode",val:": typing.Optional[str] = 'default'"},{name:"crops_coords",val:": typing.Optional[typing.Tuple[int, int, int, int]] = None"}],parametersDescription:[{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.invert.image",description:`<strong>image</strong> (<code>PipelineImageInput</code>) — | |
| Input for the image(s) that are to be edited. Multiple input images have to default to the same aspect | |
| ratio.`,name:"image"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.invert.source_prompt",description:`<strong>source_prompt</strong> (<code>str</code>, defaults to <code>""</code>) — | |
| Prompt describing the input image that will be used for guidance during inversion. Guidance is disabled | |
| if the <code>source_prompt</code> is <code>""</code>.`,name:"source_prompt"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.invert.source_guidance_scale",description:`<strong>source_guidance_scale</strong> (<code>float</code>, defaults to <code>3.5</code>) — | |
| Strength of guidance during inversion.`,name:"source_guidance_scale"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.invert.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is | |
| less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.invert.negative_prompt_2",description:`<strong>negative_prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation to be sent to <code>tokenizer_2</code> and | |
| <code>text_encoder_2</code>. If not defined, <code>negative_prompt</code> is used in both text-encoders`,name:"negative_prompt_2"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.invert.num_inversion_steps",description:`<strong>num_inversion_steps</strong> (<code>int</code>, defaults to <code>50</code>) — | |
| Number of total performed inversion steps after discarding the initial <code>skip</code> steps.`,name:"num_inversion_steps"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.invert.skip",description:`<strong>skip</strong> (<code>float</code>, defaults to <code>0.15</code>) — | |
| Portion of initial steps that will be ignored for inversion and subsequent generation. Lower values | |
| will lead to stronger changes to the input image. <code>skip</code> has to be between <code>0</code> and <code>1</code>.`,name:"skip"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.invert.generator",description:`<strong>generator</strong> (<code>torch.Generator</code>, <em>optional</em>) — | |
| A <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow"><code>torch.Generator</code></a> to make inversion | |
| deterministic.`,name:"generator"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.invert.crops_coords_top_left",description:`<strong>crops_coords_top_left</strong> (<code>Tuple[int]</code>, <em>optional</em>, defaults to (0, 0)) — | |
| <code>crops_coords_top_left</code> can be used to generate an image that appears to be “cropped” from the position | |
| <code>crops_coords_top_left</code> downwards. Favorable, well-centered images are usually achieved by setting | |
| <code>crops_coords_top_left</code> to (0, 0). Part of SDXL’s micro-conditioning as explained in section 2.2 of | |
| <a href="https://huggingface.co/papers/2307.01952" rel="nofollow">https://huggingface.co/papers/2307.01952</a>.`,name:"crops_coords_top_left"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.invert.num_zero_noise_steps",description:`<strong>num_zero_noise_steps</strong> (<code>int</code>, defaults to <code>3</code>) — | |
| Number of final diffusion steps that will not renoise the current image. If no steps are set to zero | |
| SD-XL in combination with <a href="/docs/diffusers/pr_11660/en/api/schedulers/multistep_dpm_solver#diffusers.DPMSolverMultistepScheduler">DPMSolverMultistepScheduler</a> will produce noise artifacts.`,name:"num_zero_noise_steps"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.invert.cross_attention_kwargs",description:`<strong>cross_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"cross_attention_kwargs"}],source:"https://github.com/huggingface/diffusers/blob/vr_11660/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py#L1461",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>Output will contain the resized input image(s) | |
| and respective VAE reconstruction(s).</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><a | |
| href="/docs/diffusers/pr_11660/en/api/pipelines/ledits_pp#diffusers.pipelines.LEditsPPInversionPipelineOutput" | |
| >LEditsPPInversionPipelineOutput</a></p> | |
| `}}),_e=new T({props:{name:"disable_vae_slicing",anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.disable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11660/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py#L775"}}),he=new T({props:{name:"disable_vae_tiling",anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.disable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11660/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py#L790"}}),be=new T({props:{name:"enable_vae_slicing",anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.enable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11660/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py#L768"}}),Pe=new T({props:{name:"enable_vae_tiling",anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.enable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11660/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py#L782"}}),ve=new T({props:{name:"encode_prompt",anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.encode_prompt",parameters:[{name:"device",val:": typing.Optional[torch.device] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"negative_prompt",val:": typing.Optional[str] = None"},{name:"negative_prompt_2",val:": typing.Optional[str] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_pooled_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"lora_scale",val:": typing.Optional[float] = None"},{name:"clip_skip",val:": typing.Optional[int] = None"},{name:"enable_edit_guidance",val:": bool = True"},{name:"editing_prompt",val:": typing.Optional[str] = None"},{name:"editing_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"editing_pooled_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"}],parametersDescription:[{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>): | |
| torch device`,name:"device"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead.`,name:"negative_prompt"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.encode_prompt.negative_prompt_2",description:`<strong>negative_prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation to be sent to <code>tokenizer_2</code> and | |
| <code>text_encoder_2</code>. If not defined, <code>negative_prompt</code> is used in both text-encoders`,name:"negative_prompt_2"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.encode_prompt.negative_pooled_prompt_embeds",description:`<strong>negative_pooled_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, pooled negative_prompt_embeds will be generated from <code>negative_prompt</code> | |
| input argument.`,name:"negative_pooled_prompt_embeds"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.encode_prompt.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>, <em>optional</em>) — | |
| A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.`,name:"lora_scale"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.encode_prompt.clip_skip",description:`<strong>clip_skip</strong> (<code>int</code>, <em>optional</em>) — | |
| Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that | |
| the output of the pre-final layer will be used for computing the prompt embeddings.`,name:"clip_skip"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.encode_prompt.enable_edit_guidance",description:`<strong>enable_edit_guidance</strong> (<code>bool</code>) — | |
| Whether to guide towards an editing prompt or not.`,name:"enable_edit_guidance"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.encode_prompt.editing_prompt",description:`<strong>editing_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| Editing prompt(s) to be encoded. If not defined and ‘enable_edit_guidance’ is True, one has to pass | |
| <code>editing_prompt_embeds</code> instead.`,name:"editing_prompt"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.encode_prompt.editing_prompt_embeds",description:`<strong>editing_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated edit text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. | |
| If not provided and ‘enable_edit_guidance’ is True, editing_prompt_embeds will be generated from | |
| <code>editing_prompt</code> input argument.`,name:"editing_prompt_embeds"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.encode_prompt.editing_pooled_prompt_embeds",description:`<strong>editing_pooled_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated edit pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, pooled editing_pooled_prompt_embeds will be generated from <code>editing_prompt</code> | |
| input argument.`,name:"editing_pooled_prompt_embeds"}],source:"https://github.com/huggingface/diffusers/blob/vr_11660/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py#L401"}}),Le=new T({props:{name:"get_guidance_scale_embedding",anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.get_guidance_scale_embedding",parameters:[{name:"w",val:": Tensor"},{name:"embedding_dim",val:": int = 512"},{name:"dtype",val:": dtype = torch.float32"}],parametersDescription:[{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.get_guidance_scale_embedding.w",description:`<strong>w</strong> (<code>torch.Tensor</code>) — | |
| Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.`,name:"w"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.get_guidance_scale_embedding.embedding_dim",description:`<strong>embedding_dim</strong> (<code>int</code>, <em>optional</em>, defaults to 512) — | |
| Dimension of the embeddings to generate.`,name:"embedding_dim"},{anchor:"diffusers.LEditsPPPipelineStableDiffusionXL.get_guidance_scale_embedding.dtype",description:`<strong>dtype</strong> (<code>torch.dtype</code>, <em>optional</em>, defaults to <code>torch.float32</code>) — | |
| Data type of the generated embeddings.`,name:"dtype"}],source:"https://github.com/huggingface/diffusers/blob/vr_11660/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py#L707",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>Embedding vectors with shape <code>(len(w), embedding_dim)</code>.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>torch.Tensor</code></p> | |
| `}}),ye=new it({props:{title:"LEditsPPDiffusionPipelineOutput",local:"diffusers.pipelines.LEditsPPDiffusionPipelineOutput",headingTag:"h2"}}),we=new T({props:{name:"class diffusers.pipelines.LEditsPPDiffusionPipelineOutput",anchor:"diffusers.pipelines.LEditsPPDiffusionPipelineOutput",parameters:[{name:"images",val:": typing.Union[typing.List[PIL.Image.Image], numpy.ndarray]"},{name:"nsfw_content_detected",val:": typing.Optional[typing.List[bool]]"}],parametersDescription:[{anchor:"diffusers.pipelines.LEditsPPDiffusionPipelineOutput.images",description:`<strong>images</strong> (<code>List[PIL.Image.Image]</code> or <code>np.ndarray</code>) — | |
| List of denoised PIL images of length <code>batch_size</code> or NumPy array of shape <code>(batch_size, height, width, num_channels)</code>.`,name:"images"},{anchor:"diffusers.pipelines.LEditsPPDiffusionPipelineOutput.nsfw_content_detected",description:`<strong>nsfw_content_detected</strong> (<code>List[bool]</code>) — | |
| List indicating whether the corresponding generated image contains “not-safe-for-work” (nsfw) content or | |
| <code>None</code> if safety checking could not be performed.`,name:"nsfw_content_detected"}],source:"https://github.com/huggingface/diffusers/blob/vr_11660/src/diffusers/pipelines/ledits_pp/pipeline_output.py#L10"}}),xe=new it({props:{title:"LEditsPPInversionPipelineOutput",local:"diffusers.pipelines.LEditsPPInversionPipelineOutput",headingTag:"h2"}}),De=new T({props:{name:"class diffusers.pipelines.LEditsPPInversionPipelineOutput",anchor:"diffusers.pipelines.LEditsPPInversionPipelineOutput",parameters:[{name:"images",val:": typing.Union[typing.List[PIL.Image.Image], numpy.ndarray]"},{name:"vae_reconstruction_images",val:": typing.Union[typing.List[PIL.Image.Image], numpy.ndarray]"}],parametersDescription:[{anchor:"diffusers.pipelines.LEditsPPInversionPipelineOutput.input_images",description:`<strong>input_images</strong> (<code>List[PIL.Image.Image]</code> or <code>np.ndarray</code>) — | |
| List of the cropped and resized input images as PIL images of length <code>batch_size</code> or NumPy array of shape <code> (batch_size, height, width, num_channels)</code>.`,name:"input_images"},{anchor:"diffusers.pipelines.LEditsPPInversionPipelineOutput.vae_reconstruction_images",description:`<strong>vae_reconstruction_images</strong> (<code>List[PIL.Image.Image]</code> or <code>np.ndarray</code>) — | |
| List of VAE reconstruction of all input images as PIL images of length <code>batch_size</code> or NumPy array of shape | |
| <code> (batch_size, height, width, num_channels)</code>.`,name:"vae_reconstruction_images"}],source:"https://github.com/huggingface/diffusers/blob/vr_11660/src/diffusers/pipelines/ledits_pp/pipeline_output.py#L28"}}),Ee=new ln({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/ledits_pp.md"}}),{c(){d=s("meta"),E=i(),L=s("p"),w=i(),m(k.$$.fragment),c=i(),S=s("div"),S.innerHTML=yi,nt=i(),K=s("p"),K.innerHTML=wi,ot=i(),ee=s("p"),ee.textContent=xi,st=i(),te=s("p"),te.innerHTML=Di,rt=i(),m(X.$$.fragment),at=i(),m(U.$$.fragment),dt=i(),ie=s("p"),ie.textContent=Ei,lt=i(),m(ne.$$.fragment),pt=i(),v=s("div"),m(oe.$$.fragment),Ct=i(),Se=s("p"),Se.textContent=Ti,Nt=i(),$e=s("p"),$e.innerHTML=ki,jt=i(),I=s("div"),m(se.$$.fragment),Xt=i(),Ie=s("p"),Ie.innerHTML=Si,Ut=i(),m(z.$$.fragment),zt=i(),J=s("div"),m(re.$$.fragment),Jt=i(),Me=s("p"),Me.innerHTML=$i,At=i(),A=s("div"),m(ae.$$.fragment),Wt=i(),Ce=s("p"),Ce.innerHTML=Ii,Zt=i(),W=s("div"),m(de.$$.fragment),Ot=i(),Ne=s("p"),Ne.innerHTML=Mi,Gt=i(),Z=s("div"),m(le.$$.fragment),Vt=i(),je=s("p"),je.textContent=Ci,Bt=i(),O=s("div"),m(pe.$$.fragment),Rt=i(),Xe=s("p"),Xe.textContent=Ni,Ft=i(),G=s("div"),m(ce.$$.fragment),qt=i(),Ue=s("p"),Ue.textContent=ji,ct=i(),m(fe.$$.fragment),ft=i(),p=s("div"),m(me.$$.fragment),Ht=i(),ze=s("p"),ze.textContent=Xi,Qt=i(),Je=s("p"),Je.innerHTML=Ui,Yt=i(),Ae=s("p"),Ae.textContent=zi,Kt=i(),We=s("ul"),We.innerHTML=Ji,ei=i(),Ze=s("p"),Ze.textContent=Ai,ti=i(),Oe=s("ul"),Oe.innerHTML=Wi,ii=i(),M=s("div"),m(ue.$$.fragment),ni=i(),Ge=s("p"),Ge.innerHTML=Zi,oi=i(),m(V.$$.fragment),si=i(),B=s("div"),m(ge.$$.fragment),ri=i(),Ve=s("p"),Ve.innerHTML=Oi,ai=i(),R=s("div"),m(_e.$$.fragment),di=i(),Be=s("p"),Be.innerHTML=Gi,li=i(),F=s("div"),m(he.$$.fragment),pi=i(),Re=s("p"),Re.innerHTML=Vi,ci=i(),q=s("div"),m(be.$$.fragment),fi=i(),Fe=s("p"),Fe.textContent=Bi,mi=i(),H=s("div"),m(Pe.$$.fragment),ui=i(),qe=s("p"),qe.textContent=Ri,gi=i(),Q=s("div"),m(ve.$$.fragment),_i=i(),He=s("p"),He.textContent=Fi,hi=i(),Y=s("div"),m(Le.$$.fragment),bi=i(),Qe=s("p"),Qe.innerHTML=qi,mt=i(),m(ye.$$.fragment),ut=i(),C=s("div"),m(we.$$.fragment),Pi=i(),Ye=s("p"),Ye.textContent=Hi,gt=i(),m(xe.$$.fragment),_t=i(),N=s("div"),m(De.$$.fragment),vi=i(),Ke=s("p"),Ke.textContent=Qi,ht=i(),m(Ee.$$.fragment),bt=i(),tt=s("p"),this.h()},l(e){const a=rn("svelte-u9bgzb",document.head);d=r(a,"META",{name:!0,content:!0}),a.forEach(o),E=n(e),L=r(e,"P",{}),D(L).forEach(o),w=n(e),u(k.$$.fragment,e),c=n(e),S=r(e,"DIV",{class:!0,"data-svelte-h":!0}),f(S)!=="svelte-si9ct8"&&(S.innerHTML=yi),nt=n(e),K=r(e,"P",{"data-svelte-h":!0}),f(K)!=="svelte-ke65yv"&&(K.innerHTML=wi),ot=n(e),ee=r(e,"P",{"data-svelte-h":!0}),f(ee)!=="svelte-1cwsb16"&&(ee.textContent=xi),st=n(e),te=r(e,"P",{"data-svelte-h":!0}),f(te)!=="svelte-1jncrmh"&&(te.innerHTML=Di),rt=n(e),u(X.$$.fragment,e),at=n(e),u(U.$$.fragment,e),dt=n(e),ie=r(e,"P",{"data-svelte-h":!0}),f(ie)!=="svelte-1qwf4p0"&&(ie.textContent=Ei),lt=n(e),u(ne.$$.fragment,e),pt=n(e),v=r(e,"DIV",{class:!0});var y=D(v);u(oe.$$.fragment,y),Ct=n(y),Se=r(y,"P",{"data-svelte-h":!0}),f(Se)!=="svelte-i87xsw"&&(Se.textContent=Ti),Nt=n(y),$e=r(y,"P",{"data-svelte-h":!0}),f($e)!=="svelte-nigaej"&&($e.innerHTML=ki),jt=n(y),I=r(y,"DIV",{class:!0});var j=D(I);u(se.$$.fragment,j),Xt=n(j),Ie=r(j,"P",{"data-svelte-h":!0}),f(Ie)!=="svelte-83kf6p"&&(Ie.innerHTML=Si),Ut=n(j),u(z.$$.fragment,j),j.forEach(o),zt=n(y),J=r(y,"DIV",{class:!0});var Te=D(J);u(re.$$.fragment,Te),Jt=n(Te),Me=r(Te,"P",{"data-svelte-h":!0}),f(Me)!=="svelte-1ivbnjl"&&(Me.innerHTML=$i),Te.forEach(o),At=n(y),A=r(y,"DIV",{class:!0});var ke=D(A);u(ae.$$.fragment,ke),Wt=n(ke),Ce=r(ke,"P",{"data-svelte-h":!0}),f(Ce)!=="svelte-1s3c06i"&&(Ce.innerHTML=Ii),ke.forEach(o),Zt=n(y),W=r(y,"DIV",{class:!0});var vt=D(W);u(de.$$.fragment,vt),Ot=n(vt),Ne=r(vt,"P",{"data-svelte-h":!0}),f(Ne)!=="svelte-pkn4ui"&&(Ne.innerHTML=Mi),vt.forEach(o),Gt=n(y),Z=r(y,"DIV",{class:!0});var Lt=D(Z);u(le.$$.fragment,Lt),Vt=n(Lt),je=r(Lt,"P",{"data-svelte-h":!0}),f(je)!=="svelte-14bnrb6"&&(je.textContent=Ci),Lt.forEach(o),Bt=n(y),O=r(y,"DIV",{class:!0});var yt=D(O);u(pe.$$.fragment,yt),Rt=n(yt),Xe=r(yt,"P",{"data-svelte-h":!0}),f(Xe)!=="svelte-1xwrf7t"&&(Xe.textContent=Ni),yt.forEach(o),Ft=n(y),G=r(y,"DIV",{class:!0});var wt=D(G);u(ce.$$.fragment,wt),qt=n(wt),Ue=r(wt,"P",{"data-svelte-h":!0}),f(Ue)!=="svelte-16q0ax1"&&(Ue.textContent=ji),wt.forEach(o),y.forEach(o),ct=n(e),u(fe.$$.fragment,e),ft=n(e),p=r(e,"DIV",{class:!0});var P=D(p);u(me.$$.fragment,P),Ht=n(P),ze=r(P,"P",{"data-svelte-h":!0}),f(ze)!=="svelte-ojahy4"&&(ze.textContent=Xi),Qt=n(P),Je=r(P,"P",{"data-svelte-h":!0}),f(Je)!=="svelte-otlpi3"&&(Je.innerHTML=Ui),Yt=n(P),Ae=r(P,"P",{"data-svelte-h":!0}),f(Ae)!=="svelte-984r3o"&&(Ae.textContent=zi),Kt=n(P),We=r(P,"UL",{"data-svelte-h":!0}),f(We)!=="svelte-aurnck"&&(We.innerHTML=Ji),ei=n(P),Ze=r(P,"P",{"data-svelte-h":!0}),f(Ze)!=="svelte-19qtqre"&&(Ze.textContent=Ai),ti=n(P),Oe=r(P,"UL",{"data-svelte-h":!0}),f(Oe)!=="svelte-1tez18v"&&(Oe.innerHTML=Wi),ii=n(P),M=r(P,"DIV",{class:!0});var et=D(M);u(ue.$$.fragment,et),ni=n(et),Ge=r(et,"P",{"data-svelte-h":!0}),f(Ge)!=="svelte-62twut"&&(Ge.innerHTML=Zi),oi=n(et),u(V.$$.fragment,et),et.forEach(o),si=n(P),B=r(P,"DIV",{class:!0});var xt=D(B);u(ge.$$.fragment,xt),ri=n(xt),Ve=r(xt,"P",{"data-svelte-h":!0}),f(Ve)!=="svelte-1ivbnjl"&&(Ve.innerHTML=Oi),xt.forEach(o),ai=n(P),R=r(P,"DIV",{class:!0});var Dt=D(R);u(_e.$$.fragment,Dt),di=n(Dt),Be=r(Dt,"P",{"data-svelte-h":!0}),f(Be)!=="svelte-1s3c06i"&&(Be.innerHTML=Gi),Dt.forEach(o),li=n(P),F=r(P,"DIV",{class:!0});var Et=D(F);u(he.$$.fragment,Et),pi=n(Et),Re=r(Et,"P",{"data-svelte-h":!0}),f(Re)!=="svelte-pkn4ui"&&(Re.innerHTML=Vi),Et.forEach(o),ci=n(P),q=r(P,"DIV",{class:!0});var Tt=D(q);u(be.$$.fragment,Tt),fi=n(Tt),Fe=r(Tt,"P",{"data-svelte-h":!0}),f(Fe)!=="svelte-14bnrb6"&&(Fe.textContent=Bi),Tt.forEach(o),mi=n(P),H=r(P,"DIV",{class:!0});var kt=D(H);u(Pe.$$.fragment,kt),ui=n(kt),qe=r(kt,"P",{"data-svelte-h":!0}),f(qe)!=="svelte-1xwrf7t"&&(qe.textContent=Ri),kt.forEach(o),gi=n(P),Q=r(P,"DIV",{class:!0});var St=D(Q);u(ve.$$.fragment,St),_i=n(St),He=r(St,"P",{"data-svelte-h":!0}),f(He)!=="svelte-16q0ax1"&&(He.textContent=Fi),St.forEach(o),hi=n(P),Y=r(P,"DIV",{class:!0});var $t=D(Y);u(Le.$$.fragment,$t),bi=n($t),Qe=r($t,"P",{"data-svelte-h":!0}),f(Qe)!=="svelte-vo59ec"&&(Qe.innerHTML=qi),$t.forEach(o),P.forEach(o),mt=n(e),u(ye.$$.fragment,e),ut=n(e),C=r(e,"DIV",{class:!0});var It=D(C);u(we.$$.fragment,It),Pi=n(It),Ye=r(It,"P",{"data-svelte-h":!0}),f(Ye)!=="svelte-hu8uib"&&(Ye.textContent=Hi),It.forEach(o),gt=n(e),u(xe.$$.fragment,e),_t=n(e),N=r(e,"DIV",{class:!0});var Mt=D(N);u(De.$$.fragment,Mt),vi=n(Mt),Ke=r(Mt,"P",{"data-svelte-h":!0}),f(Ke)!=="svelte-hu8uib"&&(Ke.textContent=Qi),Mt.forEach(o),ht=n(e),u(Ee.$$.fragment,e),bt=n(e),tt=r(e,"P",{}),D(tt).forEach(o),this.h()},h(){x(d,"name","hf:doc:metadata"),x(d,"content",gn),x(S,"class","flex flex-wrap space-x-1"),x(I,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(J,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(A,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(W,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(Z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(O,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(G,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(v,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(M,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(B,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(R,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(F,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(H,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(Q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(Y,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(p,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(C,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(N,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,a){t(document.head,d),l(e,E,a),l(e,L,a),l(e,w,a),g(k,e,a),l(e,c,a),l(e,S,a),l(e,nt,a),l(e,K,a),l(e,ot,a),l(e,ee,a),l(e,st,a),l(e,te,a),l(e,rt,a),g(X,e,a),l(e,at,a),g(U,e,a),l(e,dt,a),l(e,ie,a),l(e,lt,a),g(ne,e,a),l(e,pt,a),l(e,v,a),g(oe,v,null),t(v,Ct),t(v,Se),t(v,Nt),t(v,$e),t(v,jt),t(v,I),g(se,I,null),t(I,Xt),t(I,Ie),t(I,Ut),g(z,I,null),t(v,zt),t(v,J),g(re,J,null),t(J,Jt),t(J,Me),t(v,At),t(v,A),g(ae,A,null),t(A,Wt),t(A,Ce),t(v,Zt),t(v,W),g(de,W,null),t(W,Ot),t(W,Ne),t(v,Gt),t(v,Z),g(le,Z,null),t(Z,Vt),t(Z,je),t(v,Bt),t(v,O),g(pe,O,null),t(O,Rt),t(O,Xe),t(v,Ft),t(v,G),g(ce,G,null),t(G,qt),t(G,Ue),l(e,ct,a),g(fe,e,a),l(e,ft,a),l(e,p,a),g(me,p,null),t(p,Ht),t(p,ze),t(p,Qt),t(p,Je),t(p,Yt),t(p,Ae),t(p,Kt),t(p,We),t(p,ei),t(p,Ze),t(p,ti),t(p,Oe),t(p,ii),t(p,M),g(ue,M,null),t(M,ni),t(M,Ge),t(M,oi),g(V,M,null),t(p,si),t(p,B),g(ge,B,null),t(B,ri),t(B,Ve),t(p,ai),t(p,R),g(_e,R,null),t(R,di),t(R,Be),t(p,li),t(p,F),g(he,F,null),t(F,pi),t(F,Re),t(p,ci),t(p,q),g(be,q,null),t(q,fi),t(q,Fe),t(p,mi),t(p,H),g(Pe,H,null),t(H,ui),t(H,qe),t(p,gi),t(p,Q),g(ve,Q,null),t(Q,_i),t(Q,He),t(p,hi),t(p,Y),g(Le,Y,null),t(Y,bi),t(Y,Qe),l(e,mt,a),g(ye,e,a),l(e,ut,a),l(e,C,a),g(we,C,null),t(C,Pi),t(C,Ye),l(e,gt,a),g(xe,e,a),l(e,_t,a),l(e,N,a),g(De,N,null),t(N,vi),t(N,Ke),l(e,ht,a),g(Ee,e,a),l(e,bt,a),l(e,tt,a),Pt=!0},p(e,[a]){const y={};a&2&&(y.$$scope={dirty:a,ctx:e}),X.$set(y);const j={};a&2&&(j.$$scope={dirty:a,ctx:e}),U.$set(j);const Te={};a&2&&(Te.$$scope={dirty:a,ctx:e}),z.$set(Te);const ke={};a&2&&(ke.$$scope={dirty:a,ctx:e}),V.$set(ke)},i(e){Pt||(_(k.$$.fragment,e),_(X.$$.fragment,e),_(U.$$.fragment,e),_(ne.$$.fragment,e),_(oe.$$.fragment,e),_(se.$$.fragment,e),_(z.$$.fragment,e),_(re.$$.fragment,e),_(ae.$$.fragment,e),_(de.$$.fragment,e),_(le.$$.fragment,e),_(pe.$$.fragment,e),_(ce.$$.fragment,e),_(fe.$$.fragment,e),_(me.$$.fragment,e),_(ue.$$.fragment,e),_(V.$$.fragment,e),_(ge.$$.fragment,e),_(_e.$$.fragment,e),_(he.$$.fragment,e),_(be.$$.fragment,e),_(Pe.$$.fragment,e),_(ve.$$.fragment,e),_(Le.$$.fragment,e),_(ye.$$.fragment,e),_(we.$$.fragment,e),_(xe.$$.fragment,e),_(De.$$.fragment,e),_(Ee.$$.fragment,e),Pt=!0)},o(e){h(k.$$.fragment,e),h(X.$$.fragment,e),h(U.$$.fragment,e),h(ne.$$.fragment,e),h(oe.$$.fragment,e),h(se.$$.fragment,e),h(z.$$.fragment,e),h(re.$$.fragment,e),h(ae.$$.fragment,e),h(de.$$.fragment,e),h(le.$$.fragment,e),h(pe.$$.fragment,e),h(ce.$$.fragment,e),h(fe.$$.fragment,e),h(me.$$.fragment,e),h(ue.$$.fragment,e),h(V.$$.fragment,e),h(ge.$$.fragment,e),h(_e.$$.fragment,e),h(he.$$.fragment,e),h(be.$$.fragment,e),h(Pe.$$.fragment,e),h(ve.$$.fragment,e),h(Le.$$.fragment,e),h(ye.$$.fragment,e),h(we.$$.fragment,e),h(xe.$$.fragment,e),h(De.$$.fragment,e),h(Ee.$$.fragment,e),Pt=!1},d(e){e&&(o(E),o(L),o(w),o(c),o(S),o(nt),o(K),o(ot),o(ee),o(st),o(te),o(rt),o(at),o(dt),o(ie),o(lt),o(pt),o(v),o(ct),o(ft),o(p),o(mt),o(ut),o(C),o(gt),o(_t),o(N),o(ht),o(bt),o(tt)),o(d),b(k,e),b(X,e),b(U,e),b(ne,e),b(oe),b(se),b(z),b(re),b(ae),b(de),b(le),b(pe),b(ce),b(fe,e),b(me),b(ue),b(V),b(ge),b(_e),b(he),b(be),b(Pe),b(ve),b(Le),b(ye,e),b(we),b(xe,e),b(De),b(Ee,e)}}}const gn='{"title":"LEDITS++","local":"ledits","sections":[{"title":"LEditsPPPipelineStableDiffusion","local":"diffusers.LEditsPPPipelineStableDiffusion","sections":[],"depth":2},{"title":"LEditsPPPipelineStableDiffusionXL","local":"diffusers.LEditsPPPipelineStableDiffusionXL","sections":[],"depth":2},{"title":"LEditsPPDiffusionPipelineOutput","local":"diffusers.pipelines.LEditsPPDiffusionPipelineOutput","sections":[],"depth":2},{"title":"LEditsPPInversionPipelineOutput","local":"diffusers.pipelines.LEditsPPInversionPipelineOutput","sections":[],"depth":2}],"depth":1}';function _n($){return nn(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class xn extends on{constructor(d){super(),sn(this,d,_n,un,tn,{})}}export{xn as component}; | |
Xet Storage Details
- Size:
- 98 kB
- Xet hash:
- 377b1e2f089aec526d95b4cd264697ca506f123fc81dad03e4c79aaae6f24cdb
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.