Buckets:
| import{s as _n,o as hn,n as fn}from"../chunks/scheduler.53228c21.js";import{S as yn,i as bn,e as l,s,c as d,h as wn,a as p,d as t,b as o,f as J,g as m,j as M,k as $,l as _,m as a,n as c,t as u,o as g,p as f}from"../chunks/index.100fac89.js";import{D as oe}from"../chunks/Docstring.f8721f67.js";import{C as _e}from"../chunks/CodeBlock.d30a6509.js";import{E as gn}from"../chunks/ExampleCodeBlock.24511344.js";import{H as fe,E as Mn}from"../chunks/MermaidChart.svelte_svelte_type_style_lang.d8195636.js";function In(ie){let r,U="Examples:",b,h,y;return h=new _e({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwSHVueXVhbkltYWdlUGlwZWxpbmUlMEElMEFwaXBlJTIwJTNEJTIwSHVueXVhbkltYWdlUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMmh1bnl1YW52aWRlby1jb21tdW5pdHklMkZIdW55dWFuSW1hZ2UtMi4xLURpZmZ1c2VycyUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYlMEEpJTBBcGlwZS50byglMjJjdWRhJTIyKSUwQXByb21wdCUyMCUzRCUyMCUyMkElMjBjYXQlMjBob2xkaW5nJTIwYSUyMHNpZ24lMjB0aGF0JTIwc2F5cyUyMGhlbGxvJTIwd29ybGQlMjIlMEElMjMlMjBEZXBlbmRpbmclMjBvbiUyMHRoZSUyMHZhcmlhbnQlMjBiZWluZyUyMHVzZWQlMkMlMjB0aGUlMjBwaXBlbGluZSUyMGNhbGwlMjB3aWxsJTIwc2xpZ2h0bHklMjB2YXJ5LiUwQSUyMyUyMFJlZmVyJTIwdG8lMjB0aGUlMjBwaXBlbGluZSUyMGRvY3VtZW50YXRpb24lMjBmb3IlMjBtb3JlJTIwZGV0YWlscy4lMEFpbWFnZSUyMCUzRCUyMHBpcGUocHJvbXB0JTJDJTIwbmVnYXRpdmVfcHJvbXB0JTNEJTIyJTIyJTJDJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDUwKS5pbWFnZXMlNUIwJTVEJTBBaW1hZ2Uuc2F2ZSglMjJodW55dWFuaW1hZ2UucG5nJTIyKQ==",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> HunyuanImagePipeline | |
| <span class="hljs-meta">>>> </span>pipe = HunyuanImagePipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"hunyuanvideo-community/HunyuanImage-2.1-Diffusers"</span>, torch_dtype=torch.bfloat16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"A cat holding a sign that says hello world"</span> | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Depending on the variant being used, the pipeline call will slightly vary.</span> | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Refer to the pipeline documentation for more details.</span> | |
| <span class="hljs-meta">>>> </span>image = pipe(prompt, negative_prompt=<span class="hljs-string">""</span>, num_inference_steps=<span class="hljs-number">50</span>).images[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>image.save(<span class="hljs-string">"hunyuanimage.png"</span>)`,wrap:!1}}),{c(){r=l("p"),r.textContent=U,b=s(),d(h.$$.fragment)},l(i){r=p(i,"P",{"data-svelte-h":!0}),M(r)!=="svelte-kvfsh7"&&(r.textContent=U),b=o(i),m(h.$$.fragment,i)},m(i,w){a(i,r,w),a(i,b,w),c(h,i,w),y=!0},p:fn,i(i){y||(u(h.$$.fragment,i),y=!0)},o(i){g(h.$$.fragment,i),y=!1},d(i){i&&(t(r),t(b)),f(h,i)}}}function vn(ie){let r,U="Examples:",b,h,y;return h=new _e({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwSHVueXVhbkltYWdlUmVmaW5lclBpcGVsaW5lJTBBJTBBcGlwZSUyMCUzRCUyMEh1bnl1YW5JbWFnZVJlZmluZXJQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyaHVueXVhbnZpZGVvLWNvbW11bml0eSUyRkh1bnl1YW5JbWFnZS0yLjEtUmVmaW5lci1EaWZmdXNlcnMlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2JTBBKSUwQXBpcGUudG8oJTIyY3VkYSUyMiklMEFwcm9tcHQlMjAlM0QlMjAlMjJBJTIwY2F0JTIwaG9sZGluZyUyMGElMjBzaWduJTIwdGhhdCUyMHNheXMlMjBoZWxsbyUyMHdvcmxkJTIyJTBBaW1hZ2UlMjAlM0QlMjBsb2FkX2ltYWdlKCUyMnBhdGglMkZ0byUyRmltYWdlLnBuZyUyMiklMEElMjMlMjBEZXBlbmRpbmclMjBvbiUyMHRoZSUyMHZhcmlhbnQlMjBiZWluZyUyMHVzZWQlMkMlMjB0aGUlMjBwaXBlbGluZSUyMGNhbGwlMjB3aWxsJTIwc2xpZ2h0bHklMjB2YXJ5LiUwQSUyMyUyMFJlZmVyJTIwdG8lMjB0aGUlMjBwaXBlbGluZSUyMGRvY3VtZW50YXRpb24lMjBmb3IlMjBtb3JlJTIwZGV0YWlscy4lMEFpbWFnZSUyMCUzRCUyMHBpcGUocHJvbXB0JTJDJTIwaW1hZ2UlM0RpbWFnZSUyQyUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0Q0KS5pbWFnZXMlNUIwJTVEJTBBaW1hZ2Uuc2F2ZSglMjJodW55dWFuaW1hZ2UucG5nJTIyKQ==",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> HunyuanImageRefinerPipeline | |
| <span class="hljs-meta">>>> </span>pipe = HunyuanImageRefinerPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"hunyuanvideo-community/HunyuanImage-2.1-Refiner-Diffusers"</span>, torch_dtype=torch.bfloat16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"A cat holding a sign that says hello world"</span> | |
| <span class="hljs-meta">>>> </span>image = load_image(<span class="hljs-string">"path/to/image.png"</span>) | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Depending on the variant being used, the pipeline call will slightly vary.</span> | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Refer to the pipeline documentation for more details.</span> | |
| <span class="hljs-meta">>>> </span>image = pipe(prompt, image=image, num_inference_steps=<span class="hljs-number">4</span>).images[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>image.save(<span class="hljs-string">"hunyuanimage.png"</span>)`,wrap:!1}}),{c(){r=l("p"),r.textContent=U,b=s(),d(h.$$.fragment)},l(i){r=p(i,"P",{"data-svelte-h":!0}),M(r)!=="svelte-kvfsh7"&&(r.textContent=U),b=o(i),m(h.$$.fragment,i)},m(i,w){a(i,r,w),a(i,b,w),c(h,i,w),y=!0},p:fn,i(i){y||(u(h.$$.fragment,i),y=!0)},o(i){g(h.$$.fragment,i),y=!1},d(i){i&&(t(r),t(b)),f(h,i)}}}function Tn(ie){let r,U,b,h,y,i,w,Ae="HunyuanImage-2.1 is a 17B text-to-image model that is capable of generating 2K (2048 x 2048) resolution images",ye,C,Ke="HunyuanImage-2.1 comes in the following variants:",be,G,en='<thead><tr><th align="center">model type</th> <th align="center">model id</th></tr></thead> <tbody><tr><td align="center">HunyuanImage-2.1</td> <td align="center"><a href="https://huggingface.co/hunyuanvideo-community/HunyuanImage-2.1-Diffusers" rel="nofollow">hunyuanvideo-community/HunyuanImage-2.1-Diffusers</a></td></tr> <tr><td align="center">HunyuanImage-2.1-Distilled</td> <td align="center"><a href="https://huggingface.co/hunyuanvideo-community/HunyuanImage-2.1-Distilled-Diffusers" rel="nofollow">hunyuanvideo-community/HunyuanImage-2.1-Distilled-Diffusers</a></td></tr> <tr><td align="center">HunyuanImage-2.1-Refiner</td> <td align="center"><a href="https://huggingface.co/hunyuanvideo-community/HunyuanImage-2.1-Refiner-Diffusers" rel="nofollow">hunyuanvideo-community/HunyuanImage-2.1-Refiner-Diffusers</a></td></tr></tbody>',we,W,nn="<p>[!TIP][Caching](../../optimization/cache) may also speed up inference by storing and reusing intermediate outputs.</p>",Me,R,Ie,V,tn='HunyuanImage-2.1 applies <a href="https://huggingface.co/papers/2410.02416" rel="nofollow">Adaptive Projected Guidance (APG)</a> combined with Classifier-Free Guidance (CFG) in the denoising loop. <code>HunyuanImagePipeline</code> has a <code>guider</code> component (read more about <a href="../modular_diffusers/guiders">Guider</a>) and does not take a <code>guidance_scale</code> parameter at runtime. To change guider-related parameters, e.g., <code>guidance_scale</code>, you can update the <code>guider</code> configuration instead.',ve,E,Te,L,an="You can inspect the <code>guider</code> object:",Ue,N,je,q,sn="To update the guider with a different configuration, use the <code>new()</code> method. For example, to generate an image with <code>guidance_scale=5.0</code> while keeping all other default guidance parameters:",xe,z,ke,Q,He,X,on="use <code>distilled_guidance_scale</code> with the guidance-distilled checkpoint,",Je,S,$e,Y,Be,I,D,Le,re,rn="The HunyuanImage pipeline for text-to-image generation.",Ne,j,F,qe,le,ln="Function invoked when calling the pipeline for generation.",ze,B,Qe,pe,O,Pe,A,Ze,v,K,Xe,de,pn="The HunyuanImage pipeline for text-to-image generation.",Se,x,ee,Ye,me,dn="Function invoked when calling the pipeline for generation.",De,P,Fe,ce,ne,Ce,te,Ge,k,ae,Oe,ue,mn="Output class for HunyuanImage pipelines.",We,se,Re,he,Ve;return y=new fe({props:{title:"HunyuanImage2.1",local:"hunyuanimage21",headingTag:"h1"}}),R=new fe({props:{title:"HunyuanImage-2.1",local:"hunyuanimage-21",headingTag:"h2"}}),E=new _e({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwSHVueXVhbkltYWdlUGlwZWxpbmUlMEElMEFwaXBlJTIwJTNEJTIwSHVueXVhbkltYWdlUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMmh1bnl1YW52aWRlby1jb21tdW5pdHklMkZIdW55dWFuSW1hZ2UtMi4xLURpZmZ1c2VycyUyMiUyQyUyMCUwQSUyMCUyMCUyMCUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYlMEEpJTBBcGlwZSUyMCUzRCUyMHBpcGUudG8oJTIyY3VkYSUyMik=",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> HunyuanImagePipeline | |
| pipe = HunyuanImagePipeline.from_pretrained( | |
| <span class="hljs-string">"hunyuanvideo-community/HunyuanImage-2.1-Diffusers"</span>, | |
| torch_dtype=torch.bfloat16 | |
| ) | |
| pipe = pipe.to(<span class="hljs-string">"cuda"</span>)`,wrap:!1}}),N=new _e({props:{code:"cGlwZS5ndWlkZXIlMEE=",highlighted:`<span class="hljs-meta">>>> </span>pipe.guider | |
| AdaptiveProjectedMixGuidance { | |
| <span class="hljs-string">"_class_name"</span>: <span class="hljs-string">"AdaptiveProjectedMixGuidance"</span>, | |
| <span class="hljs-string">"_diffusers_version"</span>: <span class="hljs-string">"0.36.0.dev0"</span>, | |
| <span class="hljs-string">"adaptive_projected_guidance_momentum"</span>: -<span class="hljs-number">0.5</span>, | |
| <span class="hljs-string">"adaptive_projected_guidance_rescale"</span>: <span class="hljs-number">10.0</span>, | |
| <span class="hljs-string">"adaptive_projected_guidance_scale"</span>: <span class="hljs-number">10.0</span>, | |
| <span class="hljs-string">"adaptive_projected_guidance_start_step"</span>: <span class="hljs-number">5</span>, | |
| <span class="hljs-string">"enabled"</span>: true, | |
| <span class="hljs-string">"eta"</span>: <span class="hljs-number">0.0</span>, | |
| <span class="hljs-string">"guidance_rescale"</span>: <span class="hljs-number">0.0</span>, | |
| <span class="hljs-string">"guidance_scale"</span>: <span class="hljs-number">3.5</span>, | |
| <span class="hljs-string">"start"</span>: <span class="hljs-number">0.0</span>, | |
| <span class="hljs-string">"stop"</span>: <span class="hljs-number">1.0</span>, | |
| <span class="hljs-string">"use_original_formulation"</span>: false | |
| } | |
| State: | |
| step: <span class="hljs-literal">None</span> | |
| num_inference_steps: <span class="hljs-literal">None</span> | |
| timestep: <span class="hljs-literal">None</span> | |
| count_prepared: <span class="hljs-number">0</span> | |
| enabled: <span class="hljs-literal">True</span> | |
| num_conditions: <span class="hljs-number">2</span> | |
| momentum_buffer: <span class="hljs-literal">None</span> | |
| is_apg_enabled: <span class="hljs-literal">False</span> | |
| is_cfg_enabled: <span class="hljs-literal">True</span>`,wrap:!1}}),z=new _e({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwSHVueXVhbkltYWdlUGlwZWxpbmUlMEElMEFwaXBlJTIwJTNEJTIwSHVueXVhbkltYWdlUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMmh1bnl1YW52aWRlby1jb21tdW5pdHklMkZIdW55dWFuSW1hZ2UtMi4xLURpZmZ1c2VycyUyMiUyQyUyMCUwQSUyMCUyMCUyMCUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYlMEEpJTBBcGlwZSUyMCUzRCUyMHBpcGUudG8oJTIyY3VkYSUyMiklMEElMEElMjMlMjBVcGRhdGUlMjB0aGUlMjBndWlkZXIlMjBjb25maWd1cmF0aW9uJTBBcGlwZS5ndWlkZXIlMjAlM0QlMjBwaXBlLmd1aWRlci5uZXcoZ3VpZGFuY2Vfc2NhbGUlM0Q1LjApJTBBJTBBcHJvbXB0JTIwJTNEJTIwKCUwQSUyMCUyMCUyMCUyMCUyMkElMjBjdXRlJTJDJTIwY2FydG9vbi1zdHlsZSUyMGFudGhyb3BvbW9ycGhpYyUyMHBlbmd1aW4lMjBwbHVzaCUyMHRveSUyMHdpdGglMjBmbHVmZnklMjBmdXIlMkMlMjBzdGFuZGluZyUyMGluJTIwYSUyMHBhaW50aW5nJTIwc3R1ZGlvJTJDJTIwJTIyJTBBJTIwJTIwJTIwJTIwJTIyd2VhcmluZyUyMGElMjByZWQlMjBrbml0dGVkJTIwc2NhcmYlMjBhbmQlMjBhJTIwcmVkJTIwYmVyZXQlMjB3aXRoJTIwdGhlJTIwd29yZCUyMCdUZW5jZW50JyUyMG9uJTIwaXQlMkMlMjBob2xkaW5nJTIwYSUyMHBhaW50YnJ1c2glMjB3aXRoJTIwYSUyMCUyMiUwQSUyMCUyMCUyMCUyMCUyMmZvY3VzZWQlMjBleHByZXNzaW9uJTIwYXMlMjBpdCUyMHBhaW50cyUyMGFuJTIwb2lsJTIwcGFpbnRpbmclMjBvZiUyMHRoZSUyME1vbmElMjBMaXNhJTJDJTIwcmVuZGVyZWQlMjBpbiUyMGElMjBwaG90b3JlYWxpc3RpYyUyMHBob3RvZ3JhcGhpYyUyMHN0eWxlLiUyMiUwQSklMEElMEFpbWFnZSUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwcHJvbXB0JTNEcHJvbXB0JTJDJTIwJTBBJTIwJTIwJTIwJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDUwJTJDJTIwJTBBJTIwJTIwJTIwJTIwaGVpZ2h0JTNEMjA0OCUyQyUyMCUwQSUyMCUyMCUyMCUyMHdpZHRoJTNEMjA0OCUyQyUwQSkuaW1hZ2VzJTVCMCU1RCUwQWltYWdlLnNhdmUoJTIyaW1hZ2UucG5nJTIyKQ==",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> HunyuanImagePipeline | |
| pipe = HunyuanImagePipeline.from_pretrained( | |
| <span class="hljs-string">"hunyuanvideo-community/HunyuanImage-2.1-Diffusers"</span>, | |
| torch_dtype=torch.bfloat16 | |
| ) | |
| pipe = pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-comment"># Update the guider configuration</span> | |
| pipe.guider = pipe.guider.new(guidance_scale=<span class="hljs-number">5.0</span>) | |
| prompt = ( | |
| <span class="hljs-string">"A cute, cartoon-style anthropomorphic penguin plush toy with fluffy fur, standing in a painting studio, "</span> | |
| <span class="hljs-string">"wearing a red knitted scarf and a red beret with the word 'Tencent' on it, holding a paintbrush with a "</span> | |
| <span class="hljs-string">"focused expression as it paints an oil painting of the Mona Lisa, rendered in a photorealistic photographic style."</span> | |
| ) | |
| image = pipe( | |
| prompt=prompt, | |
| num_inference_steps=<span class="hljs-number">50</span>, | |
| height=<span class="hljs-number">2048</span>, | |
| width=<span class="hljs-number">2048</span>, | |
| ).images[<span class="hljs-number">0</span>] | |
| image.save(<span class="hljs-string">"image.png"</span>)`,wrap:!1}}),Q=new fe({props:{title:"HunyuanImage-2.1-Distilled",local:"hunyuanimage-21-distilled",headingTag:"h2"}}),S=new _e({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwSHVueXVhbkltYWdlUGlwZWxpbmUlMEFwaXBlJTIwJTNEJTIwSHVueXVhbkltYWdlUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUyMmh1bnl1YW52aWRlby1jb21tdW5pdHklMkZIdW55dWFuSW1hZ2UtMi4xLURpc3RpbGxlZC1EaWZmdXNlcnMlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2KSUwQXBpcGUlMjAlM0QlMjBwaXBlLnRvKCUyMmN1ZGElMjIpJTBBJTBBcHJvbXB0JTIwJTNEJTIwKCUwQSUyMCUyMCUyMCUyMCUyMkElMjBjdXRlJTJDJTIwY2FydG9vbi1zdHlsZSUyMGFudGhyb3BvbW9ycGhpYyUyMHBlbmd1aW4lMjBwbHVzaCUyMHRveSUyMHdpdGglMjBmbHVmZnklMjBmdXIlMkMlMjBzdGFuZGluZyUyMGluJTIwYSUyMHBhaW50aW5nJTIwc3R1ZGlvJTJDJTIwJTIyJTBBJTIwJTIwJTIwJTIwJTIyd2VhcmluZyUyMGElMjByZWQlMjBrbml0dGVkJTIwc2NhcmYlMjBhbmQlMjBhJTIwcmVkJTIwYmVyZXQlMjB3aXRoJTIwdGhlJTIwd29yZCUyMCdUZW5jZW50JyUyMG9uJTIwaXQlMkMlMjBob2xkaW5nJTIwYSUyMHBhaW50YnJ1c2glMjB3aXRoJTIwYSUyMCUyMiUwQSUyMCUyMCUyMCUyMCUyMmZvY3VzZWQlMjBleHByZXNzaW9uJTIwYXMlMjBpdCUyMHBhaW50cyUyMGFuJTIwb2lsJTIwcGFpbnRpbmclMjBvZiUyMHRoZSUyME1vbmElMjBMaXNhJTJDJTIwcmVuZGVyZWQlMjBpbiUyMGElMjBwaG90b3JlYWxpc3RpYyUyMHBob3RvZ3JhcGhpYyUyMHN0eWxlLiUyMiUwQSklMEElMEFvdXQlMjAlM0QlMjBwaXBlKCUwQSUyMCUyMCUyMCUyMHByb21wdCUyQyUwQSUyMCUyMCUyMCUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0Q4JTJDJTBBJTIwJTIwJTIwJTIwZGlzdGlsbGVkX2d1aWRhbmNlX3NjYWxlJTNEMy4yNSUyQyUwQSUyMCUyMCUyMCUyMGhlaWdodCUzRDIwNDglMkMlMEElMjAlMjAlMjAlMjB3aWR0aCUzRDIwNDglMkMlMEElMjAlMjAlMjAlMjBnZW5lcmF0b3IlM0RnZW5lcmF0b3IlMkMlMEEpLmltYWdlcyU1QjAlNUQlMEE=",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> HunyuanImagePipeline | |
| pipe = HunyuanImagePipeline.from_pretrained(<span class="hljs-string">"hunyuanvideo-community/HunyuanImage-2.1-Distilled-Diffusers"</span>, torch_dtype=torch.bfloat16) | |
| pipe = pipe.to(<span class="hljs-string">"cuda"</span>) | |
| prompt = ( | |
| <span class="hljs-string">"A cute, cartoon-style anthropomorphic penguin plush toy with fluffy fur, standing in a painting studio, "</span> | |
| <span class="hljs-string">"wearing a red knitted scarf and a red beret with the word 'Tencent' on it, holding a paintbrush with a "</span> | |
| <span class="hljs-string">"focused expression as it paints an oil painting of the Mona Lisa, rendered in a photorealistic photographic style."</span> | |
| ) | |
| out = pipe( | |
| prompt, | |
| num_inference_steps=<span class="hljs-number">8</span>, | |
| distilled_guidance_scale=<span class="hljs-number">3.25</span>, | |
| height=<span class="hljs-number">2048</span>, | |
| width=<span class="hljs-number">2048</span>, | |
| generator=generator, | |
| ).images[<span class="hljs-number">0</span>] | |
| `,wrap:!1}}),Y=new fe({props:{title:"HunyuanImagePipeline",local:"diffusers.HunyuanImagePipeline",headingTag:"h2"}}),D=new oe({props:{name:"class diffusers.HunyuanImagePipeline",anchor:"diffusers.HunyuanImagePipeline",parameters:[{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKLHunyuanImage"},{name:"text_encoder",val:": Qwen2_5_VLForConditionalGeneration"},{name:"tokenizer",val:": Qwen2Tokenizer"},{name:"text_encoder_2",val:": T5EncoderModel"},{name:"tokenizer_2",val:": ByT5Tokenizer"},{name:"transformer",val:": HunyuanImageTransformer2DModel"},{name:"guider",val:": typing.Optional[diffusers.guiders.adaptive_projected_guidance_mix.AdaptiveProjectedMixGuidance] = None"},{name:"ocr_guider",val:": typing.Optional[diffusers.guiders.adaptive_projected_guidance_mix.AdaptiveProjectedMixGuidance] = None"}],parametersDescription:[{anchor:"diffusers.HunyuanImagePipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_12249/en/api/models/hunyuanimage_transformer_2d#diffusers.HunyuanImageTransformer2DModel">HunyuanImageTransformer2DModel</a>) — | |
| Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.`,name:"transformer"},{anchor:"diffusers.HunyuanImagePipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_12249/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) — | |
| A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.HunyuanImagePipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_12249/en/api/models/autoencoder_kl_hunyuanimage#diffusers.AutoencoderKLHunyuanImage">AutoencoderKLHunyuanImage</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.HunyuanImagePipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>Qwen2.5-VL-7B-Instruct</code>) — | |
| <a href="https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct" rel="nofollow">Qwen2.5-VL-7B-Instruct</a>, specifically the | |
| <a href="https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct" rel="nofollow">Qwen2.5-VL-7B-Instruct</a> variant.`,name:"text_encoder"},{anchor:"diffusers.HunyuanImagePipeline.tokenizer",description:"<strong>tokenizer</strong> (<code>Qwen2Tokenizer</code>) — Tokenizer of class [Qwen2Tokenizer].",name:"tokenizer"},{anchor:"diffusers.HunyuanImagePipeline.text_encoder_2",description:`<strong>text_encoder_2</strong> (<code>T5EncoderModel</code>) — | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel" rel="nofollow">T5EncoderModel</a> | |
| variant.`,name:"text_encoder_2"},{anchor:"diffusers.HunyuanImagePipeline.tokenizer_2",description:"<strong>tokenizer_2</strong> (<code>ByT5Tokenizer</code>) — Tokenizer of class [ByT5Tokenizer]",name:"tokenizer_2"},{anchor:"diffusers.HunyuanImagePipeline.guider",description:`<strong>guider</strong> (<code>AdaptiveProjectedMixGuidance</code>) — | |
| [AdaptiveProjectedMixGuidance]to be used to guide the image generation.`,name:"guider"},{anchor:"diffusers.HunyuanImagePipeline.ocr_guider",description:`<strong>ocr_guider</strong> (<code>AdaptiveProjectedMixGuidance</code>, <em>optional</em>) — | |
| [AdaptiveProjectedMixGuidance] to be used to guide the image generation when text rendering is needed.`,name:"ocr_guider"}],source:"https://github.com/huggingface/diffusers/blob/vr_12249/src/diffusers/pipelines/hunyuan_image/pipeline_hunyuanimage.py#L158"}}),F=new oe({props:{name:"__call__",anchor:"diffusers.HunyuanImagePipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"num_inference_steps",val:": int = 50"},{name:"distilled_guidance_scale",val:": typing.Optional[float] = 3.25"},{name:"sigmas",val:": typing.Optional[typing.List[float]] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds_2",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds_mask_2",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds_2",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds_mask_2",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"}],parametersDescription:[{anchor:"diffusers.HunyuanImagePipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.HunyuanImagePipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined and negative_prompt_embeds is | |
| not provided, will use an empty negative prompt. Ignored when not using guidance. ).`,name:"negative_prompt"},{anchor:"diffusers.HunyuanImagePipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The height in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"height"},{anchor:"diffusers.HunyuanImagePipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The width in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"width"},{anchor:"diffusers.HunyuanImagePipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.HunyuanImagePipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) — | |
| Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in | |
| their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed | |
| will be used.`,name:"sigmas"},{anchor:"diffusers.HunyuanImagePipeline.__call__.distilled_guidance_scale",description:`<strong>distilled_guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to None) — | |
| A guidance scale value for guidance distilled models. Unlike the traditional classifier-free guidance | |
| where the guidance scale is applied during inference through noise prediction rescaling, guidance | |
| distilled models take the guidance scale directly as an input parameter during forward pass. Guidance | |
| is enabled by setting <code>distilled_guidance_scale > 1</code>. Higher guidance scale encourages to generate | |
| images that are closely linked to the text <code>prompt</code>, usually at the expense of lower image quality. For | |
| guidance distilled models, this parameter is required. For non-distilled models, this parameter will be | |
| ignored.`,name:"distilled_guidance_scale"},{anchor:"diffusers.HunyuanImagePipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.HunyuanImagePipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.HunyuanImagePipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will be generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.HunyuanImagePipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.HunyuanImagePipeline.__call__.prompt_embeds_mask",description:`<strong>prompt_embeds_mask</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings mask. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. | |
| If not provided, text embeddings mask will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds_mask"},{anchor:"diffusers.HunyuanImagePipeline.__call__.prompt_embeds_2",description:`<strong>prompt_embeds_2</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings for ocr. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, text embeddings for ocr will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds_2"},{anchor:"diffusers.HunyuanImagePipeline.__call__.prompt_embeds_mask_2",description:`<strong>prompt_embeds_mask_2</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings mask for ocr. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, text embeddings mask for ocr will be generated from <code>prompt</code> input | |
| argument.`,name:"prompt_embeds_mask_2"},{anchor:"diffusers.HunyuanImagePipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.HunyuanImagePipeline.__call__.negative_prompt_embeds_mask",description:`<strong>negative_prompt_embeds_mask</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings mask. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative text embeddings mask will be generated from <code>negative_prompt</code> | |
| input argument.`,name:"negative_prompt_embeds_mask"},{anchor:"diffusers.HunyuanImagePipeline.__call__.negative_prompt_embeds_2",description:`<strong>negative_prompt_embeds_2</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings for ocr. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative text embeddings for ocr will be generated from <code>negative_prompt</code> | |
| input argument.`,name:"negative_prompt_embeds_2"},{anchor:"diffusers.HunyuanImagePipeline.__call__.negative_prompt_embeds_mask_2",description:`<strong>negative_prompt_embeds_mask_2</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings mask for ocr. Can be used to easily tweak text inputs, <em>e.g.</em> | |
| prompt weighting. If not provided, negative text embeddings mask for ocr will be generated from | |
| <code>negative_prompt</code> input argument.`,name:"negative_prompt_embeds_mask_2"},{anchor:"diffusers.HunyuanImagePipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.HunyuanImagePipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.qwenimage.QwenImagePipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.HunyuanImagePipeline.__call__.attention_kwargs",description:`<strong>attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"attention_kwargs"},{anchor:"diffusers.HunyuanImagePipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.HunyuanImagePipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"}],source:"https://github.com/huggingface/diffusers/blob/vr_12249/src/diffusers/pipelines/hunyuan_image/pipeline_hunyuanimage.py#L502",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.hunyuan_image.HunyuanImagePipelineOutput</code> if <code>return_dict</code> is True, otherwise a <code>tuple</code>. When | |
| returning a tuple, the first element is a list with the generated images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.hunyuan_image.HunyuanImagePipelineOutput</code> or <code>tuple</code></p> | |
| `}}),B=new gn({props:{anchor:"diffusers.HunyuanImagePipeline.__call__.example",$$slots:{default:[In]},$$scope:{ctx:ie}}}),O=new oe({props:{name:"encode_prompt",anchor:"diffusers.HunyuanImagePipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"batch_size",val:": int = 1"},{name:"num_images_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds_2",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds_mask_2",val:": typing.Optional[torch.Tensor] = None"}],parametersDescription:[{anchor:"diffusers.HunyuanImagePipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.HunyuanImagePipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>): | |
| torch device`,name:"device"},{anchor:"diffusers.HunyuanImagePipeline.encode_prompt.batch_size",description:`<strong>batch_size</strong> (<code>int</code>) — | |
| batch size of prompts, defaults to 1`,name:"batch_size"},{anchor:"diffusers.HunyuanImagePipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.HunyuanImagePipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. If not provided, text embeddings will be generated from <code>prompt</code> input | |
| argument.`,name:"prompt_embeds"},{anchor:"diffusers.HunyuanImagePipeline.encode_prompt.prompt_embeds_mask",description:`<strong>prompt_embeds_mask</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text mask. If not provided, text mask will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds_mask"},{anchor:"diffusers.HunyuanImagePipeline.encode_prompt.prompt_embeds_2",description:`<strong>prompt_embeds_2</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated glyph text embeddings from ByT5. If not provided, will be generated from <code>prompt</code> input | |
| argument using self.tokenizer_2 and self.text_encoder_2.`,name:"prompt_embeds_2"},{anchor:"diffusers.HunyuanImagePipeline.encode_prompt.prompt_embeds_mask_2",description:`<strong>prompt_embeds_mask_2</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated glyph text mask from ByT5. If not provided, will be generated from <code>prompt</code> input | |
| argument using self.tokenizer_2 and self.text_encoder_2.`,name:"prompt_embeds_mask_2"}],source:"https://github.com/huggingface/diffusers/blob/vr_12249/src/diffusers/pipelines/hunyuan_image/pipeline_hunyuanimage.py#L294"}}),A=new fe({props:{title:"HunyuanImageRefinerPipeline",local:"diffusers.HunyuanImageRefinerPipeline",headingTag:"h2"}}),K=new oe({props:{name:"class diffusers.HunyuanImageRefinerPipeline",anchor:"diffusers.HunyuanImageRefinerPipeline",parameters:[{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKLHunyuanImageRefiner"},{name:"text_encoder",val:": Qwen2_5_VLForConditionalGeneration"},{name:"tokenizer",val:": Qwen2Tokenizer"},{name:"transformer",val:": HunyuanImageTransformer2DModel"},{name:"guider",val:": typing.Optional[diffusers.guiders.adaptive_projected_guidance_mix.AdaptiveProjectedMixGuidance] = None"}],parametersDescription:[{anchor:"diffusers.HunyuanImageRefinerPipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_12249/en/api/models/hunyuanimage_transformer_2d#diffusers.HunyuanImageTransformer2DModel">HunyuanImageTransformer2DModel</a>) — | |
| Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.`,name:"transformer"},{anchor:"diffusers.HunyuanImageRefinerPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_12249/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) — | |
| A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.HunyuanImageRefinerPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_12249/en/api/models/autoencoder_kl_hunyuanimage_refiner#diffusers.AutoencoderKLHunyuanImageRefiner">AutoencoderKLHunyuanImageRefiner</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.HunyuanImageRefinerPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>Qwen2.5-VL-7B-Instruct</code>) — | |
| <a href="https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct" rel="nofollow">Qwen2.5-VL-7B-Instruct</a>, specifically the | |
| <a href="https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct" rel="nofollow">Qwen2.5-VL-7B-Instruct</a> variant.`,name:"text_encoder"},{anchor:"diffusers.HunyuanImageRefinerPipeline.tokenizer",description:"<strong>tokenizer</strong> (<code>Qwen2Tokenizer</code>) — Tokenizer of class [Qwen2Tokenizer].",name:"tokenizer"}],source:"https://github.com/huggingface/diffusers/blob/vr_12249/src/diffusers/pipelines/hunyuan_image/pipeline_hunyuanimage_refiner.py#L136"}}),ee=new oe({props:{name:"__call__",anchor:"diffusers.HunyuanImageRefinerPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"distilled_guidance_scale",val:": typing.Optional[float] = 3.25"},{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor], NoneType] = None"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"num_inference_steps",val:": int = 4"},{name:"sigmas",val:": typing.Optional[typing.List[float]] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"}],parametersDescription:[{anchor:"diffusers.HunyuanImageRefinerPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.HunyuanImageRefinerPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, will use an empty negative | |
| prompt. Ignored when not using guidance.`,name:"negative_prompt"},{anchor:"diffusers.HunyuanImageRefinerPipeline.__call__.distilled_guidance_scale",description:`<strong>distilled_guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to None) — | |
| A guidance scale value for guidance distilled models. Unlike the traditional classifier-free guidance | |
| where the guidance scale is applied during inference through noise prediction rescaling, guidance | |
| distilled models take the guidance scale directly as an input parameter during forward pass. Guidance | |
| is enabled by setting <code>distilled_guidance_scale > 1</code>. Higher guidance scale encourages to generate | |
| images that are closely linked to the text <code>prompt</code>, usually at the expense of lower image quality. For | |
| guidance distilled models, this parameter is required. For non-distilled models, this parameter will be | |
| ignored.`,name:"distilled_guidance_scale"},{anchor:"diffusers.HunyuanImageRefinerPipeline.__call__.num_images_per_prompt",description:"<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) —",name:"num_images_per_prompt"},{anchor:"diffusers.HunyuanImageRefinerPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The height in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"height"},{anchor:"diffusers.HunyuanImageRefinerPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The width in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"width"},{anchor:"diffusers.HunyuanImageRefinerPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.HunyuanImageRefinerPipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) — | |
| Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in | |
| their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed | |
| will be used.`,name:"sigmas"},{anchor:"diffusers.HunyuanImageRefinerPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.HunyuanImageRefinerPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.HunyuanImageRefinerPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will be generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.HunyuanImageRefinerPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.HunyuanImageRefinerPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.HunyuanImageRefinerPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.HunyuanImageRefinerPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.qwenimage.QwenImagePipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.HunyuanImageRefinerPipeline.__call__.attention_kwargs",description:`<strong>attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"attention_kwargs"},{anchor:"diffusers.HunyuanImageRefinerPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.HunyuanImageRefinerPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"}],source:"https://github.com/huggingface/diffusers/blob/vr_12249/src/diffusers/pipelines/hunyuan_image/pipeline_hunyuanimage_refiner.py#L434",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.hunyuan_image.HunyuanImagePipelineOutput</code> if <code>return_dict</code> is True, otherwise a <code>tuple</code>. When | |
| returning a tuple, the first element is a list with the generated images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.hunyuan_image.HunyuanImagePipelineOutput</code> or <code>tuple</code></p> | |
| `}}),P=new gn({props:{anchor:"diffusers.HunyuanImageRefinerPipeline.__call__.example",$$slots:{default:[vn]},$$scope:{ctx:ie}}}),ne=new oe({props:{name:"encode_prompt",anchor:"diffusers.HunyuanImageRefinerPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"batch_size",val:": int = 1"},{name:"num_images_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds_mask",val:": typing.Optional[torch.Tensor] = None"}],parametersDescription:[{anchor:"diffusers.HunyuanImageRefinerPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.HunyuanImageRefinerPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>): | |
| torch device`,name:"device"},{anchor:"diffusers.HunyuanImageRefinerPipeline.encode_prompt.batch_size",description:`<strong>batch_size</strong> (<code>int</code>) — | |
| batch size of prompts, defaults to 1`,name:"batch_size"},{anchor:"diffusers.HunyuanImageRefinerPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.HunyuanImageRefinerPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. If not provided, text embeddings will be generated from <code>prompt</code> input | |
| argument.`,name:"prompt_embeds"},{anchor:"diffusers.HunyuanImageRefinerPipeline.encode_prompt.prompt_embeds_mask",description:`<strong>prompt_embeds_mask</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text mask. If not provided, text mask will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds_mask"},{anchor:"diffusers.HunyuanImageRefinerPipeline.encode_prompt.prompt_embeds_2",description:`<strong>prompt_embeds_2</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated glyph text embeddings from ByT5. If not provided, will be generated from <code>prompt</code> input | |
| argument using self.tokenizer_2 and self.text_encoder_2.`,name:"prompt_embeds_2"},{anchor:"diffusers.HunyuanImageRefinerPipeline.encode_prompt.prompt_embeds_mask_2",description:`<strong>prompt_embeds_mask_2</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated glyph text mask from ByT5. If not provided, will be generated from <code>prompt</code> input | |
| argument using self.tokenizer_2 and self.text_encoder_2.`,name:"prompt_embeds_mask_2"}],source:"https://github.com/huggingface/diffusers/blob/vr_12249/src/diffusers/pipelines/hunyuan_image/pipeline_hunyuanimage_refiner.py#L223"}}),te=new fe({props:{title:"HunyuanImagePipelineOutput",local:"diffusers.pipelines.hunyuan_image.pipeline_output.HunyuanImagePipelineOutput",headingTag:"h2"}}),ae=new oe({props:{name:"class diffusers.pipelines.hunyuan_image.pipeline_output.HunyuanImagePipelineOutput",anchor:"diffusers.pipelines.hunyuan_image.pipeline_output.HunyuanImagePipelineOutput",parameters:[{name:"images",val:": typing.Union[typing.List[PIL.Image.Image], numpy.ndarray]"}],parametersDescription:[{anchor:"diffusers.pipelines.hunyuan_image.pipeline_output.HunyuanImagePipelineOutput.images",description:`<strong>images</strong> (<code>List[PIL.Image.Image]</code> or <code>np.ndarray</code>) — | |
| List of denoised PIL images of length <code>batch_size</code> or numpy array of shape <code>(batch_size, height, width, num_channels)</code>. PIL images or numpy array present the denoised images of the diffusion pipeline.`,name:"images"}],source:"https://github.com/huggingface/diffusers/blob/vr_12249/src/diffusers/pipelines/hunyuan_image/pipeline_output.py#L11"}}),se=new Mn({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/hunyuanimage21.md"}}),{c(){r=l("meta"),U=s(),b=l("p"),h=s(),d(y.$$.fragment),i=s(),w=l("p"),w.textContent=Ae,ye=s(),C=l("p"),C.textContent=Ke,be=s(),G=l("table"),G.innerHTML=en,we=s(),W=l("blockquote"),W.innerHTML=nn,Me=s(),d(R.$$.fragment),Ie=s(),V=l("p"),V.innerHTML=tn,ve=s(),d(E.$$.fragment),Te=s(),L=l("p"),L.innerHTML=an,Ue=s(),d(N.$$.fragment),je=s(),q=l("p"),q.innerHTML=sn,xe=s(),d(z.$$.fragment),ke=s(),d(Q.$$.fragment),He=s(),X=l("p"),X.innerHTML=on,Je=s(),d(S.$$.fragment),$e=s(),d(Y.$$.fragment),Be=s(),I=l("div"),d(D.$$.fragment),Le=s(),re=l("p"),re.textContent=rn,Ne=s(),j=l("div"),d(F.$$.fragment),qe=s(),le=l("p"),le.textContent=ln,ze=s(),d(B.$$.fragment),Qe=s(),pe=l("div"),d(O.$$.fragment),Pe=s(),d(A.$$.fragment),Ze=s(),v=l("div"),d(K.$$.fragment),Xe=s(),de=l("p"),de.textContent=pn,Se=s(),x=l("div"),d(ee.$$.fragment),Ye=s(),me=l("p"),me.textContent=dn,De=s(),d(P.$$.fragment),Fe=s(),ce=l("div"),d(ne.$$.fragment),Ce=s(),d(te.$$.fragment),Ge=s(),k=l("div"),d(ae.$$.fragment),Oe=s(),ue=l("p"),ue.textContent=mn,We=s(),d(se.$$.fragment),Re=s(),he=l("p"),this.h()},l(e){const n=wn("svelte-u9bgzb",document.head);r=p(n,"META",{name:!0,content:!0}),n.forEach(t),U=o(e),b=p(e,"P",{}),J(b).forEach(t),h=o(e),m(y.$$.fragment,e),i=o(e),w=p(e,"P",{"data-svelte-h":!0}),M(w)!=="svelte-vdpxxc"&&(w.textContent=Ae),ye=o(e),C=p(e,"P",{"data-svelte-h":!0}),M(C)!=="svelte-pxcomv"&&(C.textContent=Ke),be=o(e),G=p(e,"TABLE",{"data-svelte-h":!0}),M(G)!=="svelte-1cogr90"&&(G.innerHTML=en),we=o(e),W=p(e,"BLOCKQUOTE",{"data-svelte-h":!0}),M(W)!=="svelte-41zj9c"&&(W.innerHTML=nn),Me=o(e),m(R.$$.fragment,e),Ie=o(e),V=p(e,"P",{"data-svelte-h":!0}),M(V)!=="svelte-1e88cdr"&&(V.innerHTML=tn),ve=o(e),m(E.$$.fragment,e),Te=o(e),L=p(e,"P",{"data-svelte-h":!0}),M(L)!=="svelte-mdk92"&&(L.innerHTML=an),Ue=o(e),m(N.$$.fragment,e),je=o(e),q=p(e,"P",{"data-svelte-h":!0}),M(q)!=="svelte-uze9an"&&(q.innerHTML=sn),xe=o(e),m(z.$$.fragment,e),ke=o(e),m(Q.$$.fragment,e),He=o(e),X=p(e,"P",{"data-svelte-h":!0}),M(X)!=="svelte-yz3ezo"&&(X.innerHTML=on),Je=o(e),m(S.$$.fragment,e),$e=o(e),m(Y.$$.fragment,e),Be=o(e),I=p(e,"DIV",{class:!0});var T=J(I);m(D.$$.fragment,T),Le=o(T),re=p(T,"P",{"data-svelte-h":!0}),M(re)!=="svelte-o9b5ts"&&(re.textContent=rn),Ne=o(T),j=p(T,"DIV",{class:!0});var H=J(j);m(F.$$.fragment,H),qe=o(H),le=p(H,"P",{"data-svelte-h":!0}),M(le)!=="svelte-v78lg8"&&(le.textContent=ln),ze=o(H),m(B.$$.fragment,H),H.forEach(t),Qe=o(T),pe=p(T,"DIV",{class:!0});var cn=J(pe);m(O.$$.fragment,cn),cn.forEach(t),T.forEach(t),Pe=o(e),m(A.$$.fragment,e),Ze=o(e),v=p(e,"DIV",{class:!0});var Z=J(v);m(K.$$.fragment,Z),Xe=o(Z),de=p(Z,"P",{"data-svelte-h":!0}),M(de)!=="svelte-o9b5ts"&&(de.textContent=pn),Se=o(Z),x=p(Z,"DIV",{class:!0});var ge=J(x);m(ee.$$.fragment,ge),Ye=o(ge),me=p(ge,"P",{"data-svelte-h":!0}),M(me)!=="svelte-v78lg8"&&(me.textContent=dn),De=o(ge),m(P.$$.fragment,ge),ge.forEach(t),Fe=o(Z),ce=p(Z,"DIV",{class:!0});var un=J(ce);m(ne.$$.fragment,un),un.forEach(t),Z.forEach(t),Ce=o(e),m(te.$$.fragment,e),Ge=o(e),k=p(e,"DIV",{class:!0});var Ee=J(k);m(ae.$$.fragment,Ee),Oe=o(Ee),ue=p(Ee,"P",{"data-svelte-h":!0}),M(ue)!=="svelte-1gqdyq4"&&(ue.textContent=mn),Ee.forEach(t),We=o(e),m(se.$$.fragment,e),Re=o(e),he=p(e,"P",{}),J(he).forEach(t),this.h()},h(){$(r,"name","hf:doc:metadata"),$(r,"content",Un),$(j,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(pe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(I,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(x,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(ce,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(v,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),$(k,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,n){_(document.head,r),a(e,U,n),a(e,b,n),a(e,h,n),c(y,e,n),a(e,i,n),a(e,w,n),a(e,ye,n),a(e,C,n),a(e,be,n),a(e,G,n),a(e,we,n),a(e,W,n),a(e,Me,n),c(R,e,n),a(e,Ie,n),a(e,V,n),a(e,ve,n),c(E,e,n),a(e,Te,n),a(e,L,n),a(e,Ue,n),c(N,e,n),a(e,je,n),a(e,q,n),a(e,xe,n),c(z,e,n),a(e,ke,n),c(Q,e,n),a(e,He,n),a(e,X,n),a(e,Je,n),c(S,e,n),a(e,$e,n),c(Y,e,n),a(e,Be,n),a(e,I,n),c(D,I,null),_(I,Le),_(I,re),_(I,Ne),_(I,j),c(F,j,null),_(j,qe),_(j,le),_(j,ze),c(B,j,null),_(I,Qe),_(I,pe),c(O,pe,null),a(e,Pe,n),c(A,e,n),a(e,Ze,n),a(e,v,n),c(K,v,null),_(v,Xe),_(v,de),_(v,Se),_(v,x),c(ee,x,null),_(x,Ye),_(x,me),_(x,De),c(P,x,null),_(v,Fe),_(v,ce),c(ne,ce,null),a(e,Ce,n),c(te,e,n),a(e,Ge,n),a(e,k,n),c(ae,k,null),_(k,Oe),_(k,ue),a(e,We,n),c(se,e,n),a(e,Re,n),a(e,he,n),Ve=!0},p(e,[n]){const T={};n&2&&(T.$$scope={dirty:n,ctx:e}),B.$set(T);const H={};n&2&&(H.$$scope={dirty:n,ctx:e}),P.$set(H)},i(e){Ve||(u(y.$$.fragment,e),u(R.$$.fragment,e),u(E.$$.fragment,e),u(N.$$.fragment,e),u(z.$$.fragment,e),u(Q.$$.fragment,e),u(S.$$.fragment,e),u(Y.$$.fragment,e),u(D.$$.fragment,e),u(F.$$.fragment,e),u(B.$$.fragment,e),u(O.$$.fragment,e),u(A.$$.fragment,e),u(K.$$.fragment,e),u(ee.$$.fragment,e),u(P.$$.fragment,e),u(ne.$$.fragment,e),u(te.$$.fragment,e),u(ae.$$.fragment,e),u(se.$$.fragment,e),Ve=!0)},o(e){g(y.$$.fragment,e),g(R.$$.fragment,e),g(E.$$.fragment,e),g(N.$$.fragment,e),g(z.$$.fragment,e),g(Q.$$.fragment,e),g(S.$$.fragment,e),g(Y.$$.fragment,e),g(D.$$.fragment,e),g(F.$$.fragment,e),g(B.$$.fragment,e),g(O.$$.fragment,e),g(A.$$.fragment,e),g(K.$$.fragment,e),g(ee.$$.fragment,e),g(P.$$.fragment,e),g(ne.$$.fragment,e),g(te.$$.fragment,e),g(ae.$$.fragment,e),g(se.$$.fragment,e),Ve=!1},d(e){e&&(t(U),t(b),t(h),t(i),t(w),t(ye),t(C),t(be),t(G),t(we),t(W),t(Me),t(Ie),t(V),t(ve),t(Te),t(L),t(Ue),t(je),t(q),t(xe),t(ke),t(He),t(X),t(Je),t($e),t(Be),t(I),t(Pe),t(Ze),t(v),t(Ce),t(Ge),t(k),t(We),t(Re),t(he)),t(r),f(y,e),f(R,e),f(E,e),f(N,e),f(z,e),f(Q,e),f(S,e),f(Y,e),f(D),f(F),f(B),f(O),f(A,e),f(K),f(ee),f(P),f(ne),f(te,e),f(ae),f(se,e)}}}const Un='{"title":"HunyuanImage2.1","local":"hunyuanimage21","sections":[{"title":"HunyuanImage-2.1","local":"hunyuanimage-21","sections":[],"depth":2},{"title":"HunyuanImage-2.1-Distilled","local":"hunyuanimage-21-distilled","sections":[],"depth":2},{"title":"HunyuanImagePipeline","local":"diffusers.HunyuanImagePipeline","sections":[],"depth":2},{"title":"HunyuanImageRefinerPipeline","local":"diffusers.HunyuanImageRefinerPipeline","sections":[],"depth":2},{"title":"HunyuanImagePipelineOutput","local":"diffusers.pipelines.hunyuan_image.pipeline_output.HunyuanImagePipelineOutput","sections":[],"depth":2}],"depth":1}';function jn(ie){return hn(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Pn extends yn{constructor(r){super(),bn(this,r,jn,Tn,_n,{})}}export{Pn as component}; | |
Xet Storage Details
- Size:
- 59.7 kB
- Xet hash:
- a94673b94ea6aaf37625a42a6181274f00bfecd77ac5b1b11ccf458991fb5670
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.