Buckets:

rtrm's picture
download
raw
39.6 kB
import{s as $t,o as Ct,n as kt}from"../chunks/scheduler.53228c21.js";import{S as Gt,i as Bt,e as s,s as o,c,h as Lt,a as r,d as n,b as a,f as fe,g as m,j as l,k as U,l as b,m as i,n as h,t as u,o as f,p as g}from"../chunks/index.100fac89.js";import{C as Zt}from"../chunks/CopyLLMTxtMenu.7aefc1a4.js";import{D as ot}from"../chunks/Docstring.d6cb35e8.js";import{C as le}from"../chunks/CodeBlock.d30a6509.js";import{E as Xt}from"../chunks/ExampleCodeBlock.a12c1377.js";import{H as at,E as Et}from"../chunks/MermaidChart.svelte_svelte_type_style_lang.3722da43.js";function Ht(ge){let d,$="Examples:",y,w,M;return w=new le({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwUGl4QXJ0QWxwaGFQaXBlbGluZSUwQSUwQSUyMyUyMFlvdSUyMGNhbiUyMHJlcGxhY2UlMjB0aGUlMjBjaGVja3BvaW50JTIwaWQlMjB3aXRoJTIwJTIyUGl4QXJ0LWFscGhhJTJGUGl4QXJ0LVhMLTItNTEyeDUxMiUyMiUyMHRvby4lMEFwaXBlJTIwJTNEJTIwUGl4QXJ0QWxwaGFQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTIyUGl4QXJ0LWFscGhhJTJGUGl4QXJ0LVhMLTItMTAyNC1NUyUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiklMEElMjMlMjBFbmFibGUlMjBtZW1vcnklMjBvcHRpbWl6YXRpb25zLiUwQXBpcGUuZW5hYmxlX21vZGVsX2NwdV9vZmZsb2FkKCklMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJBJTIwc21hbGwlMjBjYWN0dXMlMjB3aXRoJTIwYSUyMGhhcHB5JTIwZmFjZSUyMGluJTIwdGhlJTIwU2FoYXJhJTIwZGVzZXJ0LiUyMiUwQWltYWdlJTIwJTNEJTIwcGlwZShwcm9tcHQpLmltYWdlcyU1QjAlNUQ=",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> PixArtAlphaPipeline
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># You can replace the checkpoint id with &quot;PixArt-alpha/PixArt-XL-2-512x512&quot; too.</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe = PixArtAlphaPipeline.from_pretrained(<span class="hljs-string">&quot;PixArt-alpha/PixArt-XL-2-1024-MS&quot;</span>, torch_dtype=torch.float16)
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-comment"># Enable memory optimizations.</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe.enable_model_cpu_offload()
<span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;A small cactus with a happy face in the Sahara desert.&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>image = pipe(prompt).images[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){d=s("p"),d.textContent=$,y=o(),c(w.$$.fragment)},l(p){d=r(p,"P",{"data-svelte-h":!0}),l(d)!=="svelte-kvfsh7"&&(d.textContent=$),y=a(p),m(w.$$.fragment,p)},m(p,T){i(p,d,T),i(p,y,T),h(w,p,T),M=!0},p:kt,i(p){M||(u(w.$$.fragment,p),M=!0)},o(p){f(w.$$.fragment,p),M=!1},d(p){p&&(n(d),n(y)),g(w,p)}}}function Wt(ge){let d,$,y,w,M,p,T,_e,C,st='<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/pixart/header_collage.png"/>',be,k,rt='<a href="https://huggingface.co/papers/2310.00426" rel="nofollow">PixArt-α: Fast Training of Diffusion Transformer for Photorealistic Text-to-Image Synthesis</a> is Junsong Chen, Jincheng Yu, Chongjian Ge, Lewei Yao, Enze Xie, Yue Wu, Zhongdao Wang, James Kwok, Ping Luo, Huchuan Lu, and Zhenguo Li.',xe,G,lt="The abstract from the paper is:",we,B,pt="<em>The most advanced text-to-image (T2I) models require significant training costs (e.g., millions of GPU hours), seriously hindering the fundamental innovation for the AIGC community while increasing CO2 emissions. This paper introduces PIXART-α, a Transformer-based T2I diffusion model whose image generation quality is competitive with state-of-the-art image generators (e.g., Imagen, SDXL, and even Midjourney), reaching near-commercial application standards. Additionally, it supports high-resolution image synthesis up to 1024px resolution with low training cost, as shown in Figure 1 and 2. To achieve this goal, three core designs are proposed: (1) Training strategy decomposition: We devise three distinct training steps that separately optimize pixel dependency, text-image alignment, and image aesthetic quality; (2) Efficient T2I Transformer: We incorporate cross-attention modules into Diffusion Transformer (DiT) to inject text conditions and streamline the computation-intensive class-condition branch; (3) High-informative data: We emphasize the significance of concept density in text-image pairs and leverage a large Vision-Language model to auto-label dense pseudo-captions to assist text-image alignment learning. As a result, PIXART-α’s training speed markedly surpasses existing large-scale T2I models, e.g., PIXART-α only takes 10.8% of Stable Diffusion v1.5’s training time (675 vs. 6,250 A100 GPU days), saving nearly $300,000 ($26,000 vs. $320,000) and reducing 90% CO2 emissions. Moreover, compared with a larger SOTA model, RAPHAEL, our training cost is merely 1%. Extensive experiments demonstrate that PIXART-α excels in image quality, artistry, and semantic control. We hope PIXART-α will provide new insights to the AIGC community and startups to accelerate building their own high-quality yet low-cost generative models from scratch.</em>",Me,L,dt='You can find the original codebase at <a href="https://github.com/PixArt-alpha/PixArt-alpha" rel="nofollow">PixArt-alpha/PixArt-alpha</a> and all the available checkpoints at <a href="https://huggingface.co/PixArt-alpha" rel="nofollow">PixArt-alpha</a>.',Te,Z,ct="Some notes about this pipeline:",ye,X,mt='<li>It uses a Transformer backbone (instead of a UNet) for denoising. As such it has a similar architecture as <a href="./dit">DiT</a>.</li> <li>It was trained using text conditions computed from T5. This aspect makes the pipeline better at following complex text prompts with intricate details.</li> <li>It is good at producing high-resolution images at different aspect ratios. To get the best results, the authors recommend some size brackets which can be found <a href="https://github.com/PixArt-alpha/PixArt-alpha/blob/08fbbd281ec96866109bdd2cdb75f2f58fb17610/diffusion/data/datasets/utils.py" rel="nofollow">here</a>.</li> <li>It rivals the quality of state-of-the-art text-to-image generation systems (as of this writing) such as Stable Diffusion XL, Imagen, and DALL-E 2, while being more efficient than them.</li>',ve,A,ht='<p>Make sure to check out the Schedulers <a href="../../using-diffusers/schedulers">guide</a> to learn how to explore the tradeoff between scheduler speed and quality, and see the <a href="../../using-diffusers/loading#reuse-a-pipeline">reuse components across pipelines</a> section to learn how to efficiently load the same components into multiple pipelines.</p>',Ae,E,Pe,H,ut='Run the <a href="/docs/diffusers/pr_12595/en/api/pipelines/pixart#diffusers.PixArtAlphaPipeline">PixArtAlphaPipeline</a> with under 8GB GPU VRAM by loading the text encoder in 8-bit precision. Let’s walk through a full-fledged example.',Je,W,ft='First, install the <a href="https://github.com/TimDettmers/bitsandbytes" rel="nofollow">bitsandbytes</a> library:',je,R,Ie,V,gt="Then load the text encoder in 8-bit:",Ue,q,$e,Q,_t="Now, use the <code>pipe</code> to encode a prompt:",Ce,N,ke,S,bt="Since text embeddings have been computed, remove the <code>text_encoder</code> and <code>pipe</code> from the memory, and free up some GPU VRAM:",Ge,Y,Be,F,xt="Then compute the latents with the prompt embeddings as inputs:",Le,D,Ze,P,wt="<p>Notice that while initializing <code>pipe</code>, you’re setting <code>text_encoder</code> to <code>None</code> so that it’s not loaded.</p>",Xe,z,Mt="Once the latents are computed, pass it off to the VAE to decode into a real image:",Ee,O,He,K,Tt='By deleting components you aren’t using and flushing the GPU VRAM, you should be able to run <a href="/docs/diffusers/pr_12595/en/api/pipelines/pixart#diffusers.PixArtAlphaPipeline">PixArtAlphaPipeline</a> with under 8GB GPU VRAM.',We,ee,yt='<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/pixart/8bits_cat.png"/>',Re,te,vt='If you want a report of your memory-usage, run this <a href="https://gist.github.com/sayakpaul/3ae0f847001d342af27018a96f467e4e" rel="nofollow">script</a>.',Ve,J,At="<p>Text embeddings computed in 8-bit can impact the quality of the generated images because of the information loss in the representation space caused by the reduced precision. It’s recommended to compare the outputs with and without 8-bit.</p>",qe,ne,Pt="While loading the <code>text_encoder</code>, you set <code>load_in_8bit</code> to <code>True</code>. You could also specify <code>load_in_4bit</code> to bring your memory requirements down even further to under 7GB.",Qe,ie,Ne,_,oe,ze,pe,Jt="Pipeline for text-to-image generation using PixArt-Alpha.",Oe,de,jt=`This model inherits from <a href="/docs/diffusers/pr_12595/en/api/pipelines/overview#diffusers.DiffusionPipeline">DiffusionPipeline</a>. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)`,Ke,v,ae,et,ce,It="Function invoked when calling the pipeline for generation.",tt,j,nt,I,se,it,me,Ut="Encodes the prompt into text encoder hidden states.",Se,re,Ye,ue,Fe;return M=new Zt({props:{containerStyle:"float: right; margin-left: 10px; display: inline-flex; position: relative; z-index: 10;"}}),T=new at({props:{title:"PixArt-α",local:"pixart-α",headingTag:"h1"}}),E=new at({props:{title:"Inference with under 8GB GPU VRAM",local:"inference-with-under-8gb-gpu-vram",headingTag:"h2"}}),R=new le({props:{code:"cGlwJTIwaW5zdGFsbCUyMC1VJTIwYml0c2FuZGJ5dGVz",highlighted:"pip install -U bitsandbytes",wrap:!1}}),q=new le({props:{code:"ZnJvbSUyMHRyYW5zZm9ybWVycyUyMGltcG9ydCUyMFQ1RW5jb2Rlck1vZGVsJTBBZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMFBpeEFydEFscGhhUGlwZWxpbmUlMEFpbXBvcnQlMjB0b3JjaCUwQSUwQXRleHRfZW5jb2RlciUyMCUzRCUyMFQ1RW5jb2Rlck1vZGVsLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJQaXhBcnQtYWxwaGElMkZQaXhBcnQtWEwtMi0xMDI0LU1TJTIyJTJDJTBBJTIwJTIwJTIwJTIwc3ViZm9sZGVyJTNEJTIydGV4dF9lbmNvZGVyJTIyJTJDJTBBJTIwJTIwJTIwJTIwbG9hZF9pbl84Yml0JTNEVHJ1ZSUyQyUwQSUyMCUyMCUyMCUyMGRldmljZV9tYXAlM0QlMjJhdXRvJTIyJTJDJTBBJTBBKSUwQXBpcGUlMjAlM0QlMjBQaXhBcnRBbHBoYVBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJQaXhBcnQtYWxwaGElMkZQaXhBcnQtWEwtMi0xMDI0LU1TJTIyJTJDJTBBJTIwJTIwJTIwJTIwdGV4dF9lbmNvZGVyJTNEdGV4dF9lbmNvZGVyJTJDJTBBJTIwJTIwJTIwJTIwdHJhbnNmb3JtZXIlM0ROb25lJTJDJTBBJTIwJTIwJTIwJTIwZGV2aWNlX21hcCUzRCUyMmF1dG8lMjIlMEEp",highlighted:`<span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5EncoderModel
<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> PixArtAlphaPipeline
<span class="hljs-keyword">import</span> torch
text_encoder = T5EncoderModel.from_pretrained(
<span class="hljs-string">&quot;PixArt-alpha/PixArt-XL-2-1024-MS&quot;</span>,
subfolder=<span class="hljs-string">&quot;text_encoder&quot;</span>,
load_in_8bit=<span class="hljs-literal">True</span>,
device_map=<span class="hljs-string">&quot;auto&quot;</span>,
)
pipe = PixArtAlphaPipeline.from_pretrained(
<span class="hljs-string">&quot;PixArt-alpha/PixArt-XL-2-1024-MS&quot;</span>,
text_encoder=text_encoder,
transformer=<span class="hljs-literal">None</span>,
device_map=<span class="hljs-string">&quot;auto&quot;</span>
)`,wrap:!1}}),N=new le({props:{code:"d2l0aCUyMHRvcmNoLm5vX2dyYWQoKSUzQSUwQSUyMCUyMCUyMCUyMHByb21wdCUyMCUzRCUyMCUyMmN1dGUlMjBjYXQlMjIlMEElMjAlMjAlMjAlMjBwcm9tcHRfZW1iZWRzJTJDJTIwcHJvbXB0X2F0dGVudGlvbl9tYXNrJTJDJTIwbmVnYXRpdmVfZW1iZWRzJTJDJTIwbmVnYXRpdmVfcHJvbXB0X2F0dGVudGlvbl9tYXNrJTIwJTNEJTIwcGlwZS5lbmNvZGVfcHJvbXB0KHByb21wdCk=",highlighted:`<span class="hljs-keyword">with</span> torch.no_grad():
prompt = <span class="hljs-string">&quot;cute cat&quot;</span>
prompt_embeds, prompt_attention_mask, negative_embeds, negative_prompt_attention_mask = pipe.encode_prompt(prompt)`,wrap:!1}}),Y=new le({props:{code:"aW1wb3J0JTIwZ2MlMEElMEFkZWYlMjBmbHVzaCgpJTNBJTBBJTIwJTIwJTIwJTIwZ2MuY29sbGVjdCgpJTBBJTIwJTIwJTIwJTIwdG9yY2guY3VkYS5lbXB0eV9jYWNoZSgpJTBBJTBBZGVsJTIwdGV4dF9lbmNvZGVyJTBBZGVsJTIwcGlwZSUwQWZsdXNoKCk=",highlighted:`<span class="hljs-keyword">import</span> gc
<span class="hljs-keyword">def</span> <span class="hljs-title function_">flush</span>():
gc.collect()
torch.cuda.empty_cache()
<span class="hljs-keyword">del</span> text_encoder
<span class="hljs-keyword">del</span> pipe
flush()`,wrap:!1}}),D=new le({props:{code:"cGlwZSUyMCUzRCUyMFBpeEFydEFscGhhUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMlBpeEFydC1hbHBoYSUyRlBpeEFydC1YTC0yLTEwMjQtTVMlMjIlMkMlMEElMjAlMjAlMjAlMjB0ZXh0X2VuY29kZXIlM0ROb25lJTJDJTBBJTIwJTIwJTIwJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5mbG9hdDE2JTJDJTBBKS50byglMjJjdWRhJTIyKSUwQSUwQWxhdGVudHMlMjAlM0QlMjBwaXBlKCUwQSUyMCUyMCUyMCUyMG5lZ2F0aXZlX3Byb21wdCUzRE5vbmUlMkMlMEElMjAlMjAlMjAlMjBwcm9tcHRfZW1iZWRzJTNEcHJvbXB0X2VtYmVkcyUyQyUwQSUyMCUyMCUyMCUyMG5lZ2F0aXZlX3Byb21wdF9lbWJlZHMlM0RuZWdhdGl2ZV9lbWJlZHMlMkMlMEElMjAlMjAlMjAlMjBwcm9tcHRfYXR0ZW50aW9uX21hc2slM0Rwcm9tcHRfYXR0ZW50aW9uX21hc2slMkMlMEElMjAlMjAlMjAlMjBuZWdhdGl2ZV9wcm9tcHRfYXR0ZW50aW9uX21hc2slM0RuZWdhdGl2ZV9wcm9tcHRfYXR0ZW50aW9uX21hc2slMkMlMEElMjAlMjAlMjAlMjBudW1faW1hZ2VzX3Blcl9wcm9tcHQlM0QxJTJDJTBBJTIwJTIwJTIwJTIwb3V0cHV0X3R5cGUlM0QlMjJsYXRlbnQlMjIlMkMlMEEpLmltYWdlcyUwQSUwQWRlbCUyMHBpcGUudHJhbnNmb3JtZXIlMEFmbHVzaCgp",highlighted:`pipe = PixArtAlphaPipeline.from_pretrained(
<span class="hljs-string">&quot;PixArt-alpha/PixArt-XL-2-1024-MS&quot;</span>,
text_encoder=<span class="hljs-literal">None</span>,
torch_dtype=torch.float16,
).to(<span class="hljs-string">&quot;cuda&quot;</span>)
latents = pipe(
negative_prompt=<span class="hljs-literal">None</span>,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_embeds,
prompt_attention_mask=prompt_attention_mask,
negative_prompt_attention_mask=negative_prompt_attention_mask,
num_images_per_prompt=<span class="hljs-number">1</span>,
output_type=<span class="hljs-string">&quot;latent&quot;</span>,
).images
<span class="hljs-keyword">del</span> pipe.transformer
flush()`,wrap:!1}}),O=new le({props:{code:"d2l0aCUyMHRvcmNoLm5vX2dyYWQoKSUzQSUwQSUyMCUyMCUyMCUyMGltYWdlJTIwJTNEJTIwcGlwZS52YWUuZGVjb2RlKGxhdGVudHMlMjAlMkYlMjBwaXBlLnZhZS5jb25maWcuc2NhbGluZ19mYWN0b3IlMkMlMjByZXR1cm5fZGljdCUzREZhbHNlKSU1QjAlNUQlMEFpbWFnZSUyMCUzRCUyMHBpcGUuaW1hZ2VfcHJvY2Vzc29yLnBvc3Rwcm9jZXNzKGltYWdlJTJDJTIwb3V0cHV0X3R5cGUlM0QlMjJwaWwlMjIpJTVCMCU1RCUwQWltYWdlLnNhdmUoJTIyY2F0LnBuZyUyMik=",highlighted:`<span class="hljs-keyword">with</span> torch.no_grad():
image = pipe.vae.decode(latents / pipe.vae.config.scaling_factor, return_dict=<span class="hljs-literal">False</span>)[<span class="hljs-number">0</span>]
image = pipe.image_processor.postprocess(image, output_type=<span class="hljs-string">&quot;pil&quot;</span>)[<span class="hljs-number">0</span>]
image.save(<span class="hljs-string">&quot;cat.png&quot;</span>)`,wrap:!1}}),ie=new at({props:{title:"PixArtAlphaPipeline",local:"diffusers.PixArtAlphaPipeline",headingTag:"h2"}}),oe=new ot({props:{name:"class diffusers.PixArtAlphaPipeline",anchor:"diffusers.PixArtAlphaPipeline",parameters:[{name:"tokenizer",val:": T5Tokenizer"},{name:"text_encoder",val:": T5EncoderModel"},{name:"vae",val:": AutoencoderKL"},{name:"transformer",val:": PixArtTransformer2DModel"},{name:"scheduler",val:": DPMSolverMultistepScheduler"}],parametersDescription:[{anchor:"diffusers.PixArtAlphaPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_12595/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) &#x2014;
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.PixArtAlphaPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>T5EncoderModel</code>) &#x2014;
Frozen text-encoder. PixArt-Alpha uses
<a href="https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel" rel="nofollow">T5</a>, specifically the
<a href="https://huggingface.co/PixArt-alpha/PixArt-alpha/tree/main/t5-v1_1-xxl" rel="nofollow">t5-v1_1-xxl</a> variant.`,name:"text_encoder"},{anchor:"diffusers.PixArtAlphaPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>T5Tokenizer</code>) &#x2014;
Tokenizer of class
<a href="https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer" rel="nofollow">T5Tokenizer</a>.`,name:"tokenizer"},{anchor:"diffusers.PixArtAlphaPipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_12595/en/api/models/pixart_transformer2d#diffusers.PixArtTransformer2DModel">PixArtTransformer2DModel</a>) &#x2014;
A text conditioned <code>PixArtTransformer2DModel</code> to denoise the encoded image latents. Initially published as
<a href="https://huggingface.co/PixArt-alpha/PixArt-XL-2-1024-MS/blob/main/transformer/config.json#L2" rel="nofollow"><code>Transformer2DModel</code></a>
in the config, but the mismatch can be ignored.`,name:"transformer"},{anchor:"diffusers.PixArtAlphaPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_12595/en/api/schedulers/overview#diffusers.SchedulerMixin">SchedulerMixin</a>) &#x2014;
A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"}],source:"https://github.com/huggingface/diffusers/blob/vr_12595/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py#L241"}}),ae=new ot({props:{name:"__call__",anchor:"diffusers.PixArtAlphaPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"negative_prompt",val:": str = ''"},{name:"num_inference_steps",val:": int = 20"},{name:"timesteps",val:": typing.List[int] = None"},{name:"sigmas",val:": typing.List[float] = None"},{name:"guidance_scale",val:": float = 4.5"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"eta",val:": float = 0.0"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"callback",val:": typing.Optional[typing.Callable[[int, int, torch.Tensor], NoneType]] = None"},{name:"callback_steps",val:": int = 1"},{name:"clean_caption",val:": bool = True"},{name:"use_resolution_binning",val:": bool = True"},{name:"max_sequence_length",val:": int = 120"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.PixArtAlphaPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>.
instead.`,name:"prompt"},{anchor:"diffusers.PixArtAlphaPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation. If not defined, one has to pass
<code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is
less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.PixArtAlphaPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 100) &#x2014;
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.PixArtAlphaPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) &#x2014;
Custom timesteps to use for the denoising process with schedulers which support a <code>timesteps</code> argument
in their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is
passed will be used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.PixArtAlphaPipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) &#x2014;
Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in
their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed
will be used.`,name:"sigmas"},{anchor:"diffusers.PixArtAlphaPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 4.5) &#x2014;
Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion
Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2.
of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting
<code>guidance_scale &gt; 1</code>. Higher guidance scale encourages to generate images that are closely linked to
the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.PixArtAlphaPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.PixArtAlphaPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size) &#x2014;
The height in pixels of the generated image.`,name:"height"},{anchor:"diffusers.PixArtAlphaPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size) &#x2014;
The width in pixels of the generated image.`,name:"width"},{anchor:"diffusers.PixArtAlphaPipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) &#x2014;
Corresponds to parameter eta (&#x3B7;) in the DDIM paper: <a href="https://huggingface.co/papers/2010.02502" rel="nofollow">https://huggingface.co/papers/2010.02502</a>. Only
applies to <a href="/docs/diffusers/pr_12595/en/api/schedulers/ddim#diffusers.DDIMScheduler">schedulers.DDIMScheduler</a>, will be ignored for others.`,name:"eta"},{anchor:"diffusers.PixArtAlphaPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) &#x2014;
One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a>
to make generation deterministic.`,name:"generator"},{anchor:"diffusers.PixArtAlphaPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will be generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.PixArtAlphaPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.PixArtAlphaPipeline.__call__.prompt_attention_mask",description:"<strong>prompt_attention_mask</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014; Pre-generated attention mask for text embeddings.",name:"prompt_attention_mask"},{anchor:"diffusers.PixArtAlphaPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. For PixArt-Alpha this negative prompt should be &quot;&quot;. If not
provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.PixArtAlphaPipeline.__call__.negative_prompt_attention_mask",description:`<strong>negative_prompt_attention_mask</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated attention mask for negative text embeddings.`,name:"negative_prompt_attention_mask"},{anchor:"diffusers.PixArtAlphaPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;pil&quot;</code>) &#x2014;
The output format of the generate image. Choose between
<a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.PixArtAlphaPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
Whether or not to return a <code>~pipelines.stable_diffusion.IFPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.PixArtAlphaPipeline.__call__.callback",description:`<strong>callback</strong> (<code>Callable</code>, <em>optional</em>) &#x2014;
A function that will be called every <code>callback_steps</code> steps during inference. The function will be
called with the following arguments: <code>callback(step: int, timestep: int, latents: torch.Tensor)</code>.`,name:"callback"},{anchor:"diffusers.PixArtAlphaPipeline.__call__.callback_steps",description:`<strong>callback_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
The frequency at which the <code>callback</code> function will be called. If not specified, the callback will be
called at every step.`,name:"callback_steps"},{anchor:"diffusers.PixArtAlphaPipeline.__call__.clean_caption",description:`<strong>clean_caption</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
Whether or not to clean the caption before creating embeddings. Requires <code>beautifulsoup4</code> and <code>ftfy</code> to
be installed. If the dependencies are not installed, the embeddings will be created from the raw
prompt.`,name:"clean_caption"},{anchor:"diffusers.PixArtAlphaPipeline.__call__.use_resolution_binning",description:`<strong>use_resolution_binning</strong> (<code>bool</code> defaults to <code>True</code>) &#x2014;
If set to <code>True</code>, the requested height and width are first mapped to the closest resolutions using
<code>ASPECT_RATIO_1024_BIN</code>. After the produced latents are decoded into images, they are resized back to
the requested resolution. Useful for generating non-square images.`,name:"use_resolution_binning"},{anchor:"diffusers.PixArtAlphaPipeline.__call__.max_sequence_length",description:"<strong>max_sequence_length</strong> (<code>int</code> defaults to 120) &#x2014; Maximum sequence length to use with the <code>prompt</code>.",name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_12595/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py#L686",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p>If <code>return_dict</code> is <code>True</code>, <a
href="/docs/diffusers/pr_12595/en/api/pipelines/ddim#diffusers.ImagePipelineOutput"
>ImagePipelineOutput</a> is returned, otherwise a <code>tuple</code> is
returned where the first element is a list with the generated images</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><a
href="/docs/diffusers/pr_12595/en/api/pipelines/ddim#diffusers.ImagePipelineOutput"
>ImagePipelineOutput</a> or <code>tuple</code></p>
`}}),j=new Xt({props:{anchor:"diffusers.PixArtAlphaPipeline.__call__.example",$$slots:{default:[Ht]},$$scope:{ctx:ge}}}),se=new ot({props:{name:"encode_prompt",anchor:"diffusers.PixArtAlphaPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"do_classifier_free_guidance",val:": bool = True"},{name:"negative_prompt",val:": str = ''"},{name:"num_images_per_prompt",val:": int = 1"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"clean_caption",val:": bool = False"},{name:"max_sequence_length",val:": int = 120"},{name:"**kwargs",val:""}],parametersDescription:[{anchor:"diffusers.PixArtAlphaPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
prompt to be encoded`,name:"prompt"},{anchor:"diffusers.PixArtAlphaPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt not to guide the image generation. If not defined, one has to pass <code>negative_prompt_embeds</code>
instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is less than <code>1</code>). For
PixArt-Alpha, this should be &quot;&quot;.`,name:"negative_prompt"},{anchor:"diffusers.PixArtAlphaPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
whether to use classifier free guidance or not`,name:"do_classifier_free_guidance"},{anchor:"diffusers.PixArtAlphaPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.PixArtAlphaPipeline.encode_prompt.device",description:`<strong>device</strong> &#x2014; (<code>torch.device</code>, <em>optional</em>):
torch device to place the resulting embeddings on`,name:"device"},{anchor:"diffusers.PixArtAlphaPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.PixArtAlphaPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. For PixArt-Alpha, it&#x2019;s should be the embeddings of the &quot;&quot;
string.`,name:"negative_prompt_embeds"},{anchor:"diffusers.PixArtAlphaPipeline.encode_prompt.clean_caption",description:`<strong>clean_caption</strong> (<code>bool</code>, defaults to <code>False</code>) &#x2014;
If <code>True</code>, the function will preprocess and clean the provided caption before encoding.`,name:"clean_caption"},{anchor:"diffusers.PixArtAlphaPipeline.encode_prompt.max_sequence_length",description:"<strong>max_sequence_length</strong> (<code>int</code>, defaults to 120) &#x2014; Maximum sequence length to use for the prompt.",name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_12595/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py#L303"}}),re=new Et({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/pixart.md"}}),{c(){d=s("meta"),$=o(),y=s("p"),w=o(),c(M.$$.fragment),p=o(),c(T.$$.fragment),_e=o(),C=s("p"),C.innerHTML=st,be=o(),k=s("p"),k.innerHTML=rt,xe=o(),G=s("p"),G.textContent=lt,we=o(),B=s("p"),B.innerHTML=pt,Me=o(),L=s("p"),L.innerHTML=dt,Te=o(),Z=s("p"),Z.textContent=ct,ye=o(),X=s("ul"),X.innerHTML=mt,ve=o(),A=s("blockquote"),A.innerHTML=ht,Ae=o(),c(E.$$.fragment),Pe=o(),H=s("p"),H.innerHTML=ut,Je=o(),W=s("p"),W.innerHTML=ft,je=o(),c(R.$$.fragment),Ie=o(),V=s("p"),V.textContent=gt,Ue=o(),c(q.$$.fragment),$e=o(),Q=s("p"),Q.innerHTML=_t,Ce=o(),c(N.$$.fragment),ke=o(),S=s("p"),S.innerHTML=bt,Ge=o(),c(Y.$$.fragment),Be=o(),F=s("p"),F.textContent=xt,Le=o(),c(D.$$.fragment),Ze=o(),P=s("blockquote"),P.innerHTML=wt,Xe=o(),z=s("p"),z.textContent=Mt,Ee=o(),c(O.$$.fragment),He=o(),K=s("p"),K.innerHTML=Tt,We=o(),ee=s("p"),ee.innerHTML=yt,Re=o(),te=s("p"),te.innerHTML=vt,Ve=o(),J=s("blockquote"),J.innerHTML=At,qe=o(),ne=s("p"),ne.innerHTML=Pt,Qe=o(),c(ie.$$.fragment),Ne=o(),_=s("div"),c(oe.$$.fragment),ze=o(),pe=s("p"),pe.textContent=Jt,Oe=o(),de=s("p"),de.innerHTML=jt,Ke=o(),v=s("div"),c(ae.$$.fragment),et=o(),ce=s("p"),ce.textContent=It,tt=o(),c(j.$$.fragment),nt=o(),I=s("div"),c(se.$$.fragment),it=o(),me=s("p"),me.textContent=Ut,Se=o(),c(re.$$.fragment),Ye=o(),ue=s("p"),this.h()},l(e){const t=Lt("svelte-u9bgzb",document.head);d=r(t,"META",{name:!0,content:!0}),t.forEach(n),$=a(e),y=r(e,"P",{}),fe(y).forEach(n),w=a(e),m(M.$$.fragment,e),p=a(e),m(T.$$.fragment,e),_e=a(e),C=r(e,"P",{"data-svelte-h":!0}),l(C)!=="svelte-v8u40f"&&(C.innerHTML=st),be=a(e),k=r(e,"P",{"data-svelte-h":!0}),l(k)!=="svelte-9ej743"&&(k.innerHTML=rt),xe=a(e),G=r(e,"P",{"data-svelte-h":!0}),l(G)!=="svelte-1cwsb16"&&(G.textContent=lt),we=a(e),B=r(e,"P",{"data-svelte-h":!0}),l(B)!=="svelte-1ddr7a0"&&(B.innerHTML=pt),Me=a(e),L=r(e,"P",{"data-svelte-h":!0}),l(L)!=="svelte-1c7kadv"&&(L.innerHTML=dt),Te=a(e),Z=r(e,"P",{"data-svelte-h":!0}),l(Z)!=="svelte-v1sn1g"&&(Z.textContent=ct),ye=a(e),X=r(e,"UL",{"data-svelte-h":!0}),l(X)!=="svelte-1o0jwg8"&&(X.innerHTML=mt),ve=a(e),A=r(e,"BLOCKQUOTE",{class:!0,"data-svelte-h":!0}),l(A)!=="svelte-r1jcqf"&&(A.innerHTML=ht),Ae=a(e),m(E.$$.fragment,e),Pe=a(e),H=r(e,"P",{"data-svelte-h":!0}),l(H)!=="svelte-1ey7m9r"&&(H.innerHTML=ut),Je=a(e),W=r(e,"P",{"data-svelte-h":!0}),l(W)!=="svelte-c2h3fb"&&(W.innerHTML=ft),je=a(e),m(R.$$.fragment,e),Ie=a(e),V=r(e,"P",{"data-svelte-h":!0}),l(V)!=="svelte-5r7318"&&(V.textContent=gt),Ue=a(e),m(q.$$.fragment,e),$e=a(e),Q=r(e,"P",{"data-svelte-h":!0}),l(Q)!=="svelte-rj1jgt"&&(Q.innerHTML=_t),Ce=a(e),m(N.$$.fragment,e),ke=a(e),S=r(e,"P",{"data-svelte-h":!0}),l(S)!=="svelte-1ux3xow"&&(S.innerHTML=bt),Ge=a(e),m(Y.$$.fragment,e),Be=a(e),F=r(e,"P",{"data-svelte-h":!0}),l(F)!=="svelte-w3d4de"&&(F.textContent=xt),Le=a(e),m(D.$$.fragment,e),Ze=a(e),P=r(e,"BLOCKQUOTE",{class:!0,"data-svelte-h":!0}),l(P)!=="svelte-1389ba9"&&(P.innerHTML=wt),Xe=a(e),z=r(e,"P",{"data-svelte-h":!0}),l(z)!=="svelte-1yf7wfg"&&(z.textContent=Mt),Ee=a(e),m(O.$$.fragment,e),He=a(e),K=r(e,"P",{"data-svelte-h":!0}),l(K)!=="svelte-1o505iu"&&(K.innerHTML=Tt),We=a(e),ee=r(e,"P",{"data-svelte-h":!0}),l(ee)!=="svelte-1g55ccf"&&(ee.innerHTML=yt),Re=a(e),te=r(e,"P",{"data-svelte-h":!0}),l(te)!=="svelte-11lvu0m"&&(te.innerHTML=vt),Ve=a(e),J=r(e,"BLOCKQUOTE",{class:!0,"data-svelte-h":!0}),l(J)!=="svelte-16px8fj"&&(J.innerHTML=At),qe=a(e),ne=r(e,"P",{"data-svelte-h":!0}),l(ne)!=="svelte-1i82z60"&&(ne.innerHTML=Pt),Qe=a(e),m(ie.$$.fragment,e),Ne=a(e),_=r(e,"DIV",{class:!0});var x=fe(_);m(oe.$$.fragment,x),ze=a(x),pe=r(x,"P",{"data-svelte-h":!0}),l(pe)!=="svelte-31ka93"&&(pe.textContent=Jt),Oe=a(x),de=r(x,"P",{"data-svelte-h":!0}),l(de)!=="svelte-1mudibp"&&(de.innerHTML=jt),Ke=a(x),v=r(x,"DIV",{class:!0});var he=fe(v);m(ae.$$.fragment,he),et=a(he),ce=r(he,"P",{"data-svelte-h":!0}),l(ce)!=="svelte-v78lg8"&&(ce.textContent=It),tt=a(he),m(j.$$.fragment,he),he.forEach(n),nt=a(x),I=r(x,"DIV",{class:!0});var De=fe(I);m(se.$$.fragment,De),it=a(De),me=r(De,"P",{"data-svelte-h":!0}),l(me)!=="svelte-16q0ax1"&&(me.textContent=Ut),De.forEach(n),x.forEach(n),Se=a(e),m(re.$$.fragment,e),Ye=a(e),ue=r(e,"P",{}),fe(ue).forEach(n),this.h()},h(){U(d,"name","hf:doc:metadata"),U(d,"content",Rt),U(A,"class","tip"),U(P,"class","tip"),U(J,"class","warning"),U(v,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(I,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),U(_,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,t){b(document.head,d),i(e,$,t),i(e,y,t),i(e,w,t),h(M,e,t),i(e,p,t),h(T,e,t),i(e,_e,t),i(e,C,t),i(e,be,t),i(e,k,t),i(e,xe,t),i(e,G,t),i(e,we,t),i(e,B,t),i(e,Me,t),i(e,L,t),i(e,Te,t),i(e,Z,t),i(e,ye,t),i(e,X,t),i(e,ve,t),i(e,A,t),i(e,Ae,t),h(E,e,t),i(e,Pe,t),i(e,H,t),i(e,Je,t),i(e,W,t),i(e,je,t),h(R,e,t),i(e,Ie,t),i(e,V,t),i(e,Ue,t),h(q,e,t),i(e,$e,t),i(e,Q,t),i(e,Ce,t),h(N,e,t),i(e,ke,t),i(e,S,t),i(e,Ge,t),h(Y,e,t),i(e,Be,t),i(e,F,t),i(e,Le,t),h(D,e,t),i(e,Ze,t),i(e,P,t),i(e,Xe,t),i(e,z,t),i(e,Ee,t),h(O,e,t),i(e,He,t),i(e,K,t),i(e,We,t),i(e,ee,t),i(e,Re,t),i(e,te,t),i(e,Ve,t),i(e,J,t),i(e,qe,t),i(e,ne,t),i(e,Qe,t),h(ie,e,t),i(e,Ne,t),i(e,_,t),h(oe,_,null),b(_,ze),b(_,pe),b(_,Oe),b(_,de),b(_,Ke),b(_,v),h(ae,v,null),b(v,et),b(v,ce),b(v,tt),h(j,v,null),b(_,nt),b(_,I),h(se,I,null),b(I,it),b(I,me),i(e,Se,t),h(re,e,t),i(e,Ye,t),i(e,ue,t),Fe=!0},p(e,[t]){const x={};t&2&&(x.$$scope={dirty:t,ctx:e}),j.$set(x)},i(e){Fe||(u(M.$$.fragment,e),u(T.$$.fragment,e),u(E.$$.fragment,e),u(R.$$.fragment,e),u(q.$$.fragment,e),u(N.$$.fragment,e),u(Y.$$.fragment,e),u(D.$$.fragment,e),u(O.$$.fragment,e),u(ie.$$.fragment,e),u(oe.$$.fragment,e),u(ae.$$.fragment,e),u(j.$$.fragment,e),u(se.$$.fragment,e),u(re.$$.fragment,e),Fe=!0)},o(e){f(M.$$.fragment,e),f(T.$$.fragment,e),f(E.$$.fragment,e),f(R.$$.fragment,e),f(q.$$.fragment,e),f(N.$$.fragment,e),f(Y.$$.fragment,e),f(D.$$.fragment,e),f(O.$$.fragment,e),f(ie.$$.fragment,e),f(oe.$$.fragment,e),f(ae.$$.fragment,e),f(j.$$.fragment,e),f(se.$$.fragment,e),f(re.$$.fragment,e),Fe=!1},d(e){e&&(n($),n(y),n(w),n(p),n(_e),n(C),n(be),n(k),n(xe),n(G),n(we),n(B),n(Me),n(L),n(Te),n(Z),n(ye),n(X),n(ve),n(A),n(Ae),n(Pe),n(H),n(Je),n(W),n(je),n(Ie),n(V),n(Ue),n($e),n(Q),n(Ce),n(ke),n(S),n(Ge),n(Be),n(F),n(Le),n(Ze),n(P),n(Xe),n(z),n(Ee),n(He),n(K),n(We),n(ee),n(Re),n(te),n(Ve),n(J),n(qe),n(ne),n(Qe),n(Ne),n(_),n(Se),n(Ye),n(ue)),n(d),g(M,e),g(T,e),g(E,e),g(R,e),g(q,e),g(N,e),g(Y,e),g(D,e),g(O,e),g(ie,e),g(oe),g(ae),g(j),g(se),g(re,e)}}}const Rt='{"title":"PixArt-α","local":"pixart-α","sections":[{"title":"Inference with under 8GB GPU VRAM","local":"inference-with-under-8gb-gpu-vram","sections":[],"depth":2},{"title":"PixArtAlphaPipeline","local":"diffusers.PixArtAlphaPipeline","sections":[],"depth":2}],"depth":1}';function Vt(ge){return Ct(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class zt extends Gt{constructor(d){super(),Bt(this,d,Vt,Wt,$t,{})}}export{zt as component};

Xet Storage Details

Size:
39.6 kB
·
Xet hash:
e9cc5581bb43462bcbf9d3d579b5be6ca1b8c582c071b4027f77cb97e05ce56a

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.