Buckets:
| import{s as Ji,o as Qi,n as zn}from"../chunks/scheduler.53228c21.js";import{S as ki,i as Ui,e as i,s as t,c as d,h as ji,a as r,d as s,b as a,f as y,g as p,j as w,k as b,l as n,m as h,n as c,t as m,o as g,p as u}from"../chunks/index.100fac89.js";import{D as I}from"../chunks/Docstring.98d3e518.js";import{C as A}from"../chunks/CodeBlock.d30a6509.js";import{E as Dn}from"../chunks/ExampleCodeBlock.6f4ee49e.js";import{H as B,E as $i}from"../chunks/MermaidChart.svelte_svelte_type_style_lang.afa087fa.js";function Pi(C){let f,x="Examples:",M,_,v;return _=new A({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwUXdlbkltYWdlUGlwZWxpbmUlMEElMEFwaXBlJTIwJTNEJTIwUXdlbkltYWdlUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUyMlF3ZW4lMkZRd2VuLUltYWdlJTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5iZmxvYXQxNiklMEFwaXBlLnRvKCUyMmN1ZGElMjIpJTBBcHJvbXB0JTIwJTNEJTIwJTIyQSUyMGNhdCUyMGhvbGRpbmclMjBhJTIwc2lnbiUyMHRoYXQlMjBzYXlzJTIwaGVsbG8lMjB3b3JsZCUyMiUwQSUyMyUyMERlcGVuZGluZyUyMG9uJTIwdGhlJTIwdmFyaWFudCUyMGJlaW5nJTIwdXNlZCUyQyUyMHRoZSUyMHBpcGVsaW5lJTIwY2FsbCUyMHdpbGwlMjBzbGlnaHRseSUyMHZhcnkuJTBBJTIzJTIwUmVmZXIlMjB0byUyMHRoZSUyMHBpcGVsaW5lJTIwZG9jdW1lbnRhdGlvbiUyMGZvciUyMG1vcmUlMjBkZXRhaWxzLiUwQWltYWdlJTIwJTNEJTIwcGlwZShwcm9tcHQlMkMlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNENTApLmltYWdlcyU1QjAlNUQlMEFpbWFnZS5zYXZlKCUyMnF3ZW5pbWFnZS5wbmclMjIp",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> QwenImagePipeline | |
| <span class="hljs-meta">>>> </span>pipe = QwenImagePipeline.from_pretrained(<span class="hljs-string">"Qwen/Qwen-Image"</span>, torch_dtype=torch.bfloat16) | |
| <span class="hljs-meta">>>> </span>pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"A cat holding a sign that says hello world"</span> | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Depending on the variant being used, the pipeline call will slightly vary.</span> | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Refer to the pipeline documentation for more details.</span> | |
| <span class="hljs-meta">>>> </span>image = pipe(prompt, num_inference_steps=<span class="hljs-number">50</span>).images[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>image.save(<span class="hljs-string">"qwenimage.png"</span>)`,wrap:!1}}),{c(){f=i("p"),f.textContent=x,M=t(),d(_.$$.fragment)},l(o){f=r(o,"P",{"data-svelte-h":!0}),w(f)!=="svelte-kvfsh7"&&(f.textContent=x),M=a(o),p(_.$$.fragment,o)},m(o,T){h(o,f,T),h(o,M,T),c(_,o,T),v=!0},p:zn,i(o){v||(m(_.$$.fragment,o),v=!0)},o(o){g(_.$$.fragment,o),v=!1},d(o){o&&(s(f),s(M)),u(_,o)}}}function Ci(C){let f,x="Examples:",M,_,v;return _=new A({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwUXdlbkltYWdlSW1nMkltZ1BpcGVsaW5lJTBBZnJvbSUyMGRpZmZ1c2Vycy51dGlscyUyMGltcG9ydCUyMGxvYWRfaW1hZ2UlMEElMEFwaXBlJTIwJTNEJTIwUXdlbkltYWdlSW1nMkltZ1BpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJRd2VuJTJGUXdlbi1JbWFnZSUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYpJTBBcGlwZSUyMCUzRCUyMHBpcGUudG8oJTIyY3VkYSUyMiklMEF1cmwlMjAlM0QlMjAlMjJodHRwcyUzQSUyRiUyRnJhdy5naXRodWJ1c2VyY29udGVudC5jb20lMkZDb21wVmlzJTJGc3RhYmxlLWRpZmZ1c2lvbiUyRm1haW4lMkZhc3NldHMlMkZzdGFibGUtc2FtcGxlcyUyRmltZzJpbWclMkZza2V0Y2gtbW91bnRhaW5zLWlucHV0LmpwZyUyMiUwQWluaXRfaW1hZ2UlMjAlM0QlMjBsb2FkX2ltYWdlKHVybCkucmVzaXplKCgxMDI0JTJDJTIwMTAyNCkpJTBBcHJvbXB0JTIwJTNEJTIwJTIyY2F0JTIwd2l6YXJkJTJDJTIwZ2FuZGFsZiUyQyUyMGxvcmQlMjBvZiUyMHRoZSUyMHJpbmdzJTJDJTIwZGV0YWlsZWQlMkMlMjBmYW50YXN5JTJDJTIwY3V0ZSUyQyUyMGFkb3JhYmxlJTJDJTIwUGl4YXIlMkMlMjBEaXNuZXklMjIlMEFpbWFnZXMlMjAlM0QlMjBwaXBlKHByb21wdCUzRHByb21wdCUyQyUyMG5lZ2F0aXZlX3Byb21wdCUzRCUyMiUyMCUyMiUyQyUyMGltYWdlJTNEaW5pdF9pbWFnZSUyQyUyMHN0cmVuZ3RoJTNEMC45NSkuaW1hZ2VzJTVCMCU1RCUwQWltYWdlcy5zYXZlKCUyMnF3ZW5pbWFnZV9pbWcyaW1nLnBuZyUyMik=",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> QwenImageImg2ImgPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| <span class="hljs-meta">>>> </span>pipe = QwenImageImg2ImgPipeline.from_pretrained(<span class="hljs-string">"Qwen/Qwen-Image"</span>, torch_dtype=torch.bfloat16) | |
| <span class="hljs-meta">>>> </span>pipe = pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>url = <span class="hljs-string">"https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"</span> | |
| <span class="hljs-meta">>>> </span>init_image = load_image(url).resize((<span class="hljs-number">1024</span>, <span class="hljs-number">1024</span>)) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"cat wizard, gandalf, lord of the rings, detailed, fantasy, cute, adorable, Pixar, Disney"</span> | |
| <span class="hljs-meta">>>> </span>images = pipe(prompt=prompt, negative_prompt=<span class="hljs-string">" "</span>, image=init_image, strength=<span class="hljs-number">0.95</span>).images[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>images.save(<span class="hljs-string">"qwenimage_img2img.png"</span>)`,wrap:!1}}),{c(){f=i("p"),f.textContent=x,M=t(),d(_.$$.fragment)},l(o){f=r(o,"P",{"data-svelte-h":!0}),w(f)!=="svelte-kvfsh7"&&(f.textContent=x),M=a(o),p(_.$$.fragment,o)},m(o,T){h(o,f,T),h(o,M,T),c(_,o,T),v=!0},p:zn,i(o){v||(m(_.$$.fragment,o),v=!0)},o(o){g(_.$$.fragment,o),v=!1},d(o){o&&(s(f),s(M)),u(_,o)}}}function Zi(C){let f,x="Examples:",M,_,v;return _=new A({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwUXdlbkltYWdlSW5wYWludFBpcGVsaW5lJTBBZnJvbSUyMGRpZmZ1c2Vycy51dGlscyUyMGltcG9ydCUyMGxvYWRfaW1hZ2UlMEElMEFwaXBlJTIwJTNEJTIwUXdlbkltYWdlSW5wYWludFBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJRd2VuJTJGUXdlbi1JbWFnZSUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYpJTBBcGlwZS50byglMjJjdWRhJTIyKSUwQXByb21wdCUyMCUzRCUyMCUyMkZhY2UlMjBvZiUyMGElMjB5ZWxsb3clMjBjYXQlMkMlMjBoaWdoJTIwcmVzb2x1dGlvbiUyQyUyMHNpdHRpbmclMjBvbiUyMGElMjBwYXJrJTIwYmVuY2glMjIlMEFpbWdfdXJsJTIwJTNEJTIwJTIyaHR0cHMlM0ElMkYlMkZyYXcuZ2l0aHVidXNlcmNvbnRlbnQuY29tJTJGQ29tcFZpcyUyRmxhdGVudC1kaWZmdXNpb24lMkZtYWluJTJGZGF0YSUyRmlucGFpbnRpbmdfZXhhbXBsZXMlMkZvdmVydHVyZS1jcmVhdGlvbnMtNXNJNmZRZ1lJdW8ucG5nJTIyJTBBbWFza191cmwlMjAlM0QlMjAlMjJodHRwcyUzQSUyRiUyRnJhdy5naXRodWJ1c2VyY29udGVudC5jb20lMkZDb21wVmlzJTJGbGF0ZW50LWRpZmZ1c2lvbiUyRm1haW4lMkZkYXRhJTJGaW5wYWludGluZ19leGFtcGxlcyUyRm92ZXJ0dXJlLWNyZWF0aW9ucy01c0k2ZlFnWUl1b19tYXNrLnBuZyUyMiUwQXNvdXJjZSUyMCUzRCUyMGxvYWRfaW1hZ2UoaW1nX3VybCklMEFtYXNrJTIwJTNEJTIwbG9hZF9pbWFnZShtYXNrX3VybCklMEFpbWFnZSUyMCUzRCUyMHBpcGUocHJvbXB0JTNEcHJvbXB0JTJDJTIwbmVnYXRpdmVfcHJvbXB0JTNEJTIyJTIwJTIyJTJDJTIwaW1hZ2UlM0Rzb3VyY2UlMkMlMjBtYXNrX2ltYWdlJTNEbWFzayUyQyUyMHN0cmVuZ3RoJTNEMC44NSkuaW1hZ2VzJTVCMCU1RCUwQWltYWdlLnNhdmUoJTIycXdlbmltYWdlX2lucGFpbnRpbmcucG5nJTIyKQ==",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> QwenImageInpaintPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| <span class="hljs-meta">>>> </span>pipe = QwenImageInpaintPipeline.from_pretrained(<span class="hljs-string">"Qwen/Qwen-Image"</span>, torch_dtype=torch.bfloat16) | |
| <span class="hljs-meta">>>> </span>pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"Face of a yellow cat, high resolution, sitting on a park bench"</span> | |
| <span class="hljs-meta">>>> </span>img_url = <span class="hljs-string">"https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"</span> | |
| <span class="hljs-meta">>>> </span>mask_url = <span class="hljs-string">"https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"</span> | |
| <span class="hljs-meta">>>> </span>source = load_image(img_url) | |
| <span class="hljs-meta">>>> </span>mask = load_image(mask_url) | |
| <span class="hljs-meta">>>> </span>image = pipe(prompt=prompt, negative_prompt=<span class="hljs-string">" "</span>, image=source, mask_image=mask, strength=<span class="hljs-number">0.85</span>).images[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>image.save(<span class="hljs-string">"qwenimage_inpainting.png"</span>)`,wrap:!1}}),{c(){f=i("p"),f.textContent=x,M=t(),d(_.$$.fragment)},l(o){f=r(o,"P",{"data-svelte-h":!0}),w(f)!=="svelte-kvfsh7"&&(f.textContent=x),M=a(o),p(_.$$.fragment,o)},m(o,T){h(o,f,T),h(o,M,T),c(_,o,T),v=!0},p:zn,i(o){v||(m(_.$$.fragment,o),v=!0)},o(o){g(_.$$.fragment,o),v=!1},d(o){o&&(s(f),s(M)),u(_,o)}}}function Wi(C){let f,x="Examples:",M,_,v;return _=new A({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwUElMJTIwaW1wb3J0JTIwSW1hZ2UlMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwUXdlbkltYWdlRWRpdFBpcGVsaW5lJTBBZnJvbSUyMGRpZmZ1c2Vycy51dGlscyUyMGltcG9ydCUyMGxvYWRfaW1hZ2UlMEElMEFwaXBlJTIwJTNEJTIwUXdlbkltYWdlRWRpdFBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJRd2VuJTJGUXdlbi1JbWFnZS1FZGl0JTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5iZmxvYXQxNiklMEFwaXBlLnRvKCUyMmN1ZGElMjIpJTBBaW1hZ2UlMjAlM0QlMjBsb2FkX2ltYWdlKCUwQSUyMCUyMCUyMCUyMCUyMmh0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZkYXRhc2V0cyUyRmh1Z2dpbmdmYWNlJTJGZG9jdW1lbnRhdGlvbi1pbWFnZXMlMkZyZXNvbHZlJTJGbWFpbiUyRmRpZmZ1c2VycyUyRnlhcm4tYXJ0LXBpa2FjaHUucG5nJTIyJTBBKS5jb252ZXJ0KCUyMlJHQiUyMiklMEFwcm9tcHQlMjAlM0QlMjAoJTBBJTIwJTIwJTIwJTIwJTIyTWFrZSUyMFBpa2FjaHUlMjBob2xkJTIwYSUyMHNpZ24lMjB0aGF0JTIwc2F5cyUyMCdRd2VuJTIwRWRpdCUyMGlzJTIwYXdlc29tZSclMkMlMjB5YXJuJTIwYXJ0JTIwc3R5bGUlMkMlMjBkZXRhaWxlZCUyQyUyMHZpYnJhbnQlMjBjb2xvcnMlMjIlMEEpJTBBJTIzJTIwRGVwZW5kaW5nJTIwb24lMjB0aGUlMjB2YXJpYW50JTIwYmVpbmclMjB1c2VkJTJDJTIwdGhlJTIwcGlwZWxpbmUlMjBjYWxsJTIwd2lsbCUyMHNsaWdodGx5JTIwdmFyeS4lMEElMjMlMjBSZWZlciUyMHRvJTIwdGhlJTIwcGlwZWxpbmUlMjBkb2N1bWVudGF0aW9uJTIwZm9yJTIwbW9yZSUyMGRldGFpbHMuJTBBaW1hZ2UlMjAlM0QlMjBwaXBlKGltYWdlJTJDJTIwcHJvbXB0JTJDJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDUwKS5pbWFnZXMlNUIwJTVEJTBBaW1hZ2Uuc2F2ZSglMjJxd2VuaW1hZ2VfZWRpdC5wbmclMjIp",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> QwenImageEditPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| <span class="hljs-meta">>>> </span>pipe = QwenImageEditPipeline.from_pretrained(<span class="hljs-string">"Qwen/Qwen-Image-Edit"</span>, torch_dtype=torch.bfloat16) | |
| <span class="hljs-meta">>>> </span>pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>image = load_image( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/yarn-art-pikachu.png"</span> | |
| <span class="hljs-meta">... </span>).convert(<span class="hljs-string">"RGB"</span>) | |
| <span class="hljs-meta">>>> </span>prompt = ( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"Make Pikachu hold a sign that says 'Qwen Edit is awesome', yarn art style, detailed, vibrant colors"</span> | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Depending on the variant being used, the pipeline call will slightly vary.</span> | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Refer to the pipeline documentation for more details.</span> | |
| <span class="hljs-meta">>>> </span>image = pipe(image, prompt, num_inference_steps=<span class="hljs-number">50</span>).images[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>image.save(<span class="hljs-string">"qwenimage_edit.png"</span>)`,wrap:!1}}),{c(){f=i("p"),f.textContent=x,M=t(),d(_.$$.fragment)},l(o){f=r(o,"P",{"data-svelte-h":!0}),w(f)!=="svelte-kvfsh7"&&(f.textContent=x),M=a(o),p(_.$$.fragment,o)},m(o,T){h(o,f,T),h(o,M,T),c(_,o,T),v=!0},p:zn,i(o){v||(m(_.$$.fragment,o),v=!0)},o(o){g(_.$$.fragment,o),v=!1},d(o){o&&(s(f),s(M)),u(_,o)}}}function Ei(C){let f,x="Examples:",M,_,v;return _=new A({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwUElMJTIwaW1wb3J0JTIwSW1hZ2UlMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwUXdlbkltYWdlRWRpdElucGFpbnRQaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBsb2FkX2ltYWdlJTBBJTBBcGlwZSUyMCUzRCUyMFF3ZW5JbWFnZUVkaXRJbnBhaW50UGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUyMlF3ZW4lMkZRd2VuLUltYWdlLUVkaXQlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2KSUwQXBpcGUudG8oJTIyY3VkYSUyMiklMEFwcm9tcHQlMjAlM0QlMjAlMjJGYWNlJTIwb2YlMjBhJTIweWVsbG93JTIwY2F0JTJDJTIwaGlnaCUyMHJlc29sdXRpb24lMkMlMjBzaXR0aW5nJTIwb24lMjBhJTIwcGFyayUyMGJlbmNoJTIyJTBBJTBBaW1nX3VybCUyMCUzRCUyMCUyMmh0dHBzJTNBJTJGJTJGcmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSUyRkNvbXBWaXMlMkZsYXRlbnQtZGlmZnVzaW9uJTJGbWFpbiUyRmRhdGElMkZpbnBhaW50aW5nX2V4YW1wbGVzJTJGb3ZlcnR1cmUtY3JlYXRpb25zLTVzSTZmUWdZSXVvLnBuZyUyMiUwQW1hc2tfdXJsJTIwJTNEJTIwJTIyaHR0cHMlM0ElMkYlMkZyYXcuZ2l0aHVidXNlcmNvbnRlbnQuY29tJTJGQ29tcFZpcyUyRmxhdGVudC1kaWZmdXNpb24lMkZtYWluJTJGZGF0YSUyRmlucGFpbnRpbmdfZXhhbXBsZXMlMkZvdmVydHVyZS1jcmVhdGlvbnMtNXNJNmZRZ1lJdW9fbWFzay5wbmclMjIlMEFzb3VyY2UlMjAlM0QlMjBsb2FkX2ltYWdlKGltZ191cmwpJTBBbWFzayUyMCUzRCUyMGxvYWRfaW1hZ2UobWFza191cmwpJTBBaW1hZ2UlMjAlM0QlMjBwaXBlKCUwQSUyMCUyMCUyMCUyMHByb21wdCUzRHByb21wdCUyQyUyMG5lZ2F0aXZlX3Byb21wdCUzRCUyMiUyMCUyMiUyQyUyMGltYWdlJTNEc291cmNlJTJDJTIwbWFza19pbWFnZSUzRG1hc2slMkMlMjBzdHJlbmd0aCUzRDEuMCUyQyUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0Q1MCUwQSkuaW1hZ2VzJTVCMCU1RCUwQWltYWdlLnNhdmUoJTIycXdlbmltYWdlX2lucGFpbnRpbmcucG5nJTIyKQ==",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> QwenImageEditInpaintPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| <span class="hljs-meta">>>> </span>pipe = QwenImageEditInpaintPipeline.from_pretrained(<span class="hljs-string">"Qwen/Qwen-Image-Edit"</span>, torch_dtype=torch.bfloat16) | |
| <span class="hljs-meta">>>> </span>pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"Face of a yellow cat, high resolution, sitting on a park bench"</span> | |
| <span class="hljs-meta">>>> </span>img_url = <span class="hljs-string">"https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"</span> | |
| <span class="hljs-meta">>>> </span>mask_url = <span class="hljs-string">"https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"</span> | |
| <span class="hljs-meta">>>> </span>source = load_image(img_url) | |
| <span class="hljs-meta">>>> </span>mask = load_image(mask_url) | |
| <span class="hljs-meta">>>> </span>image = pipe( | |
| <span class="hljs-meta">... </span> prompt=prompt, negative_prompt=<span class="hljs-string">" "</span>, image=source, mask_image=mask, strength=<span class="hljs-number">1.0</span>, num_inference_steps=<span class="hljs-number">50</span> | |
| <span class="hljs-meta">... </span>).images[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>image.save(<span class="hljs-string">"qwenimage_inpainting.png"</span>)`,wrap:!1}}),{c(){f=i("p"),f.textContent=x,M=t(),d(_.$$.fragment)},l(o){f=r(o,"P",{"data-svelte-h":!0}),w(f)!=="svelte-kvfsh7"&&(f.textContent=x),M=a(o),p(_.$$.fragment,o)},m(o,T){h(o,f,T),h(o,M,T),c(_,o,T),v=!0},p:zn,i(o){v||(m(_.$$.fragment,o),v=!0)},o(o){g(_.$$.fragment,o),v=!1},d(o){o&&(s(f),s(M)),u(_,o)}}}function Li(C){let f,x="Examples:",M,_,v;return _=new A({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzLnV0aWxzJTIwaW1wb3J0JTIwbG9hZF9pbWFnZSUwQWZyb20lMjBkaWZmdXNlcnMlMjBpbXBvcnQlMjBRd2VuSW1hZ2VDb250cm9sTmV0TW9kZWwlMkMlMjBRd2VuSW1hZ2VNdWx0aUNvbnRyb2xOZXRNb2RlbCUyQyUyMFF3ZW5JbWFnZUNvbnRyb2xOZXRQaXBlbGluZSUwQSUwQSUyMyUyMFF3ZW5JbWFnZUNvbnRyb2xOZXRNb2RlbCUwQWNvbnRyb2xuZXQlMjAlM0QlMjBRd2VuSW1hZ2VDb250cm9sTmV0TW9kZWwuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMkluc3RhbnRYJTJGUXdlbi1JbWFnZS1Db250cm9sTmV0LVVuaW9uJTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5iZmxvYXQxNiUwQSklMEFwaXBlJTIwJTNEJTIwUXdlbkltYWdlQ29udHJvbE5ldFBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJRd2VuJTJGUXdlbi1JbWFnZSUyMiUyQyUyMGNvbnRyb2xuZXQlM0Rjb250cm9sbmV0JTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5iZmxvYXQxNiUwQSklMEFwaXBlLnRvKCUyMmN1ZGElMjIpJTBBcHJvbXB0JTIwJTNEJTIwJTIyQWVzdGhldGljcyUyMGFydCUyQyUyMHRyYWRpdGlvbmFsJTIwYXNpYW4lMjBwYWdvZGElMkMlMjBlbGFib3JhdGUlMjBnb2xkZW4lMjBhY2NlbnRzJTJDJTIwc2t5JTIwYmx1ZSUyMGFuZCUyMHdoaXRlJTIwY29sb3IlMjBwYWxldHRlJTJDJTIwc3dpcmxpbmclMjBjbG91ZCUyMHBhdHRlcm4lMkMlMjBkaWdpdGFsJTIwaWxsdXN0cmF0aW9uJTJDJTIwZWFzdCUyMGFzaWFuJTIwYXJjaGl0ZWN0dXJlJTJDJTIwb3JuYW1lbnRhbCUyMHJvb2Z0b3AlMkMlMjBpbnRyaWNhdGUlMjBkZXRhaWxpbmclMjBvbiUyMGJ1aWxkaW5nJTJDJTIwY3VsdHVyYWwlMjByZXByZXNlbnRhdGlvbi4lMjIlMEFuZWdhdGl2ZV9wcm9tcHQlMjAlM0QlMjAlMjIlMjAlMjIlMEFjb250cm9sX2ltYWdlJTIwJTNEJTIwbG9hZF9pbWFnZSglMEElMjAlMjAlMjAlMjAlMjJodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGSW5zdGFudFglMkZRd2VuLUltYWdlLUNvbnRyb2xOZXQtVW5pb24lMkZyZXNvbHZlJTJGbWFpbiUyRmNvbmRzJTJGY2FubnkucG5nJTIyJTBBKSUwQSUyMyUyMERlcGVuZGluZyUyMG9uJTIwdGhlJTIwdmFyaWFudCUyMGJlaW5nJTIwdXNlZCUyQyUyMHRoZSUyMHBpcGVsaW5lJTIwY2FsbCUyMHdpbGwlMjBzbGlnaHRseSUyMHZhcnkuJTBBJTIzJTIwUmVmZXIlMjB0byUyMHRoZSUyMHBpcGVsaW5lJTIwZG9jdW1lbnRhdGlvbiUyMGZvciUyMG1vcmUlMjBkZXRhaWxzLiUwQWltYWdlJTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjBwcm9tcHQlMkMlMEElMjAlMjAlMjAlMjBuZWdhdGl2ZV9wcm9tcHQlM0RuZWdhdGl2ZV9wcm9tcHQlMkMlMEElMjAlMjAlMjAlMjBjb250cm9sX2ltYWdlJTNEY29udHJvbF9pbWFnZSUyQyUwQSUyMCUyMCUyMCUyMGNvbnRyb2xuZXRfY29uZGl0aW9uaW5nX3NjYWxlJTNEMS4wJTJDJTBBJTIwJTIwJTIwJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDMwJTJDJTBBJTIwJTIwJTIwJTIwdHJ1ZV9jZmdfc2NhbGUlM0Q0LjAlMkMlMEEpLmltYWdlcyU1QjAlNUQlMEFpbWFnZS5zYXZlKCUyMnF3ZW5pbWFnZV9jbl91bmlvbi5wbmclMjIpJTBBJTBBJTIzJTIwUXdlbkltYWdlTXVsdGlDb250cm9sTmV0TW9kZWwlMEFjb250cm9sbmV0JTIwJTNEJTIwUXdlbkltYWdlQ29udHJvbE5ldE1vZGVsLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJJbnN0YW50WCUyRlF3ZW4tSW1hZ2UtQ29udHJvbE5ldC1VbmlvbiUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYlMEEpJTBBY29udHJvbG5ldCUyMCUzRCUyMFF3ZW5JbWFnZU11bHRpQ29udHJvbE5ldE1vZGVsKCU1QmNvbnRyb2xuZXQlNUQpJTBBcGlwZSUyMCUzRCUyMFF3ZW5JbWFnZUNvbnRyb2xOZXRQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyUXdlbiUyRlF3ZW4tSW1hZ2UlMjIlMkMlMjBjb250cm9sbmV0JTNEY29udHJvbG5ldCUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYlMEEpJTBBcGlwZS50byglMjJjdWRhJTIyKSUwQXByb21wdCUyMCUzRCUyMCUyMkFlc3RoZXRpY3MlMjBhcnQlMkMlMjB0cmFkaXRpb25hbCUyMGFzaWFuJTIwcGFnb2RhJTJDJTIwZWxhYm9yYXRlJTIwZ29sZGVuJTIwYWNjZW50cyUyQyUyMHNreSUyMGJsdWUlMjBhbmQlMjB3aGl0ZSUyMGNvbG9yJTIwcGFsZXR0ZSUyQyUyMHN3aXJsaW5nJTIwY2xvdWQlMjBwYXR0ZXJuJTJDJTIwZGlnaXRhbCUyMGlsbHVzdHJhdGlvbiUyQyUyMGVhc3QlMjBhc2lhbiUyMGFyY2hpdGVjdHVyZSUyQyUyMG9ybmFtZW50YWwlMjByb29mdG9wJTJDJTIwaW50cmljYXRlJTIwZGV0YWlsaW5nJTIwb24lMjBidWlsZGluZyUyQyUyMGN1bHR1cmFsJTIwcmVwcmVzZW50YXRpb24uJTIyJTBBbmVnYXRpdmVfcHJvbXB0JTIwJTNEJTIwJTIyJTIwJTIyJTBBY29udHJvbF9pbWFnZSUyMCUzRCUyMGxvYWRfaW1hZ2UoJTBBJTIwJTIwJTIwJTIwJTIyaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRkluc3RhbnRYJTJGUXdlbi1JbWFnZS1Db250cm9sTmV0LVVuaW9uJTJGcmVzb2x2ZSUyRm1haW4lMkZjb25kcyUyRmNhbm55LnBuZyUyMiUwQSklMEElMjMlMjBEZXBlbmRpbmclMjBvbiUyMHRoZSUyMHZhcmlhbnQlMjBiZWluZyUyMHVzZWQlMkMlMjB0aGUlMjBwaXBlbGluZSUyMGNhbGwlMjB3aWxsJTIwc2xpZ2h0bHklMjB2YXJ5LiUwQSUyMyUyMFJlZmVyJTIwdG8lMjB0aGUlMjBwaXBlbGluZSUyMGRvY3VtZW50YXRpb24lMjBmb3IlMjBtb3JlJTIwZGV0YWlscy4lMEFpbWFnZSUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwbmVnYXRpdmVfcHJvbXB0JTNEbmVnYXRpdmVfcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwY29udHJvbF9pbWFnZSUzRCU1QmNvbnRyb2xfaW1hZ2UlMkMlMjBjb250cm9sX2ltYWdlJTVEJTJDJTBBJTIwJTIwJTIwJTIwY29udHJvbG5ldF9jb25kaXRpb25pbmdfc2NhbGUlM0QlNUIwLjUlMkMlMjAwLjUlNUQlMkMlMEElMjAlMjAlMjAlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNEMzAlMkMlMEElMjAlMjAlMjAlMjB0cnVlX2NmZ19zY2FsZSUzRDQuMCUyQyUwQSkuaW1hZ2VzJTVCMCU1RCUwQWltYWdlLnNhdmUoJTIycXdlbmltYWdlX2NuX3VuaW9uX211bHRpLnBuZyUyMik=",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> QwenImageControlNetModel, QwenImageMultiControlNetModel, QwenImageControlNetPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># QwenImageControlNetModel</span> | |
| <span class="hljs-meta">>>> </span>controlnet = QwenImageControlNetModel.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"InstantX/Qwen-Image-ControlNet-Union"</span>, torch_dtype=torch.bfloat16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipe = QwenImageControlNetPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"Qwen/Qwen-Image"</span>, controlnet=controlnet, torch_dtype=torch.bfloat16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"Aesthetics art, traditional asian pagoda, elaborate golden accents, sky blue and white color palette, swirling cloud pattern, digital illustration, east asian architecture, ornamental rooftop, intricate detailing on building, cultural representation."</span> | |
| <span class="hljs-meta">>>> </span>negative_prompt = <span class="hljs-string">" "</span> | |
| <span class="hljs-meta">>>> </span>control_image = load_image( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"https://huggingface.co/InstantX/Qwen-Image-ControlNet-Union/resolve/main/conds/canny.png"</span> | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Depending on the variant being used, the pipeline call will slightly vary.</span> | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Refer to the pipeline documentation for more details.</span> | |
| <span class="hljs-meta">>>> </span>image = pipe( | |
| <span class="hljs-meta">... </span> prompt, | |
| <span class="hljs-meta">... </span> negative_prompt=negative_prompt, | |
| <span class="hljs-meta">... </span> control_image=control_image, | |
| <span class="hljs-meta">... </span> controlnet_conditioning_scale=<span class="hljs-number">1.0</span>, | |
| <span class="hljs-meta">... </span> num_inference_steps=<span class="hljs-number">30</span>, | |
| <span class="hljs-meta">... </span> true_cfg_scale=<span class="hljs-number">4.0</span>, | |
| <span class="hljs-meta">... </span>).images[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>image.save(<span class="hljs-string">"qwenimage_cn_union.png"</span>) | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># QwenImageMultiControlNetModel</span> | |
| <span class="hljs-meta">>>> </span>controlnet = QwenImageControlNetModel.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"InstantX/Qwen-Image-ControlNet-Union"</span>, torch_dtype=torch.bfloat16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>controlnet = QwenImageMultiControlNetModel([controlnet]) | |
| <span class="hljs-meta">>>> </span>pipe = QwenImageControlNetPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"Qwen/Qwen-Image"</span>, controlnet=controlnet, torch_dtype=torch.bfloat16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"Aesthetics art, traditional asian pagoda, elaborate golden accents, sky blue and white color palette, swirling cloud pattern, digital illustration, east asian architecture, ornamental rooftop, intricate detailing on building, cultural representation."</span> | |
| <span class="hljs-meta">>>> </span>negative_prompt = <span class="hljs-string">" "</span> | |
| <span class="hljs-meta">>>> </span>control_image = load_image( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"https://huggingface.co/InstantX/Qwen-Image-ControlNet-Union/resolve/main/conds/canny.png"</span> | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Depending on the variant being used, the pipeline call will slightly vary.</span> | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Refer to the pipeline documentation for more details.</span> | |
| <span class="hljs-meta">>>> </span>image = pipe( | |
| <span class="hljs-meta">... </span> prompt, | |
| <span class="hljs-meta">... </span> negative_prompt=negative_prompt, | |
| <span class="hljs-meta">... </span> control_image=[control_image, control_image], | |
| <span class="hljs-meta">... </span> controlnet_conditioning_scale=[<span class="hljs-number">0.5</span>, <span class="hljs-number">0.5</span>], | |
| <span class="hljs-meta">... </span> num_inference_steps=<span class="hljs-number">30</span>, | |
| <span class="hljs-meta">... </span> true_cfg_scale=<span class="hljs-number">4.0</span>, | |
| <span class="hljs-meta">... </span>).images[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>image.save(<span class="hljs-string">"qwenimage_cn_union_multi.png"</span>)`,wrap:!1}}),{c(){f=i("p"),f.textContent=x,M=t(),d(_.$$.fragment)},l(o){f=r(o,"P",{"data-svelte-h":!0}),w(f)!=="svelte-kvfsh7"&&(f.textContent=x),M=a(o),p(_.$$.fragment,o)},m(o,T){h(o,f,T),h(o,M,T),c(_,o,T),v=!0},p:zn,i(o){v||(m(_.$$.fragment,o),v=!0)},o(o){g(_.$$.fragment,o),v=!1},d(o){o&&(s(f),s(M)),u(_,o)}}}function Ni(C){let f,x="Examples:",M,_,v;return _=new A({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwUElMJTIwaW1wb3J0JTIwSW1hZ2UlMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwUXdlbkltYWdlRWRpdFBsdXNQaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBsb2FkX2ltYWdlJTBBJTBBcGlwZSUyMCUzRCUyMFF3ZW5JbWFnZUVkaXRQbHVzUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUyMlF3ZW4lMkZRd2VuLUltYWdlLUVkaXQtMjUwOSUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYpJTBBcGlwZS50byglMjJjdWRhJTIyKSUwQWltYWdlJTIwJTNEJTIwbG9hZF9pbWFnZSglMEElMjAlMjAlMjAlMjAlMjJodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGZGF0YXNldHMlMkZodWdnaW5nZmFjZSUyRmRvY3VtZW50YXRpb24taW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZkaWZmdXNlcnMlMkZ5YXJuLWFydC1waWthY2h1LnBuZyUyMiUwQSkuY29udmVydCglMjJSR0IlMjIpJTBBcHJvbXB0JTIwJTNEJTIwKCUwQSUyMCUyMCUyMCUyMCUyMk1ha2UlMjBQaWthY2h1JTIwaG9sZCUyMGElMjBzaWduJTIwdGhhdCUyMHNheXMlMjAnUXdlbiUyMEVkaXQlMjBpcyUyMGF3ZXNvbWUnJTJDJTIweWFybiUyMGFydCUyMHN0eWxlJTJDJTIwZGV0YWlsZWQlMkMlMjB2aWJyYW50JTIwY29sb3JzJTIyJTBBKSUwQSUyMyUyMERlcGVuZGluZyUyMG9uJTIwdGhlJTIwdmFyaWFudCUyMGJlaW5nJTIwdXNlZCUyQyUyMHRoZSUyMHBpcGVsaW5lJTIwY2FsbCUyMHdpbGwlMjBzbGlnaHRseSUyMHZhcnkuJTBBJTIzJTIwUmVmZXIlMjB0byUyMHRoZSUyMHBpcGVsaW5lJTIwZG9jdW1lbnRhdGlvbiUyMGZvciUyMG1vcmUlMjBkZXRhaWxzLiUwQWltYWdlJTIwJTNEJTIwcGlwZShpbWFnZSUyQyUyMHByb21wdCUyQyUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0Q1MCkuaW1hZ2VzJTVCMCU1RCUwQWltYWdlLnNhdmUoJTIycXdlbmltYWdlX2VkaXRfcGx1cy5wbmclMjIp",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> QwenImageEditPlusPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| <span class="hljs-meta">>>> </span>pipe = QwenImageEditPlusPipeline.from_pretrained(<span class="hljs-string">"Qwen/Qwen-Image-Edit-2509"</span>, torch_dtype=torch.bfloat16) | |
| <span class="hljs-meta">>>> </span>pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>image = load_image( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/yarn-art-pikachu.png"</span> | |
| <span class="hljs-meta">... </span>).convert(<span class="hljs-string">"RGB"</span>) | |
| <span class="hljs-meta">>>> </span>prompt = ( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"Make Pikachu hold a sign that says 'Qwen Edit is awesome', yarn art style, detailed, vibrant colors"</span> | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Depending on the variant being used, the pipeline call will slightly vary.</span> | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Refer to the pipeline documentation for more details.</span> | |
| <span class="hljs-meta">>>> </span>image = pipe(image, prompt, num_inference_steps=<span class="hljs-number">50</span>).images[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>image.save(<span class="hljs-string">"qwenimage_edit_plus.png"</span>)`,wrap:!1}}),{c(){f=i("p"),f.textContent=x,M=t(),d(_.$$.fragment)},l(o){f=r(o,"P",{"data-svelte-h":!0}),w(f)!=="svelte-kvfsh7"&&(f.textContent=x),M=a(o),p(_.$$.fragment,o)},m(o,T){h(o,f,T),h(o,M,T),c(_,o,T),v=!0},p:zn,i(o){v||(m(_.$$.fragment,o),v=!0)},o(o){g(_.$$.fragment,o),v=!1},d(o){o&&(s(f),s(M)),u(_,o)}}}function Gi(C){let f,x,M,_,v,o,T,xo='<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>',At,$e,Jo="Qwen-Image from the Qwen team is an image generation foundation model in the Qwen series that achieves significant advances in complex text rendering and precise image editing. Experiments show strong general capabilities in both image generation and editing, with exceptional performance in text rendering, especially for Chinese.",St,Pe,Qo="Qwen-Image comes in the following variants:",Ot,Ce,ko='<thead><tr><th align="center">model type</th> <th align="center">model id</th></tr></thead> <tbody><tr><td align="center">Qwen-Image</td> <td align="center"><a href="https://huggingface.co/Qwen/Qwen-Image" rel="nofollow"><code>Qwen/Qwen-Image</code></a></td></tr> <tr><td align="center">Qwen-Image-Edit</td> <td align="center"><a href="https://huggingface.co/Qwen/Qwen-Image-Edit" rel="nofollow"><code>Qwen/Qwen-Image-Edit</code></a></td></tr> <tr><td align="center">Qwen-Image-Edit Plus</td> <td align="center"><a href="https://huggingface.co/Qwen/Qwen-Image-Edit-2509" rel="nofollow">Qwen/Qwen-Image-Edit-2509</a></td></tr></tbody>',Kt,Ze,Uo="<p>[!TIP][Caching](../../optimization/cache) may also speed up inference by storing and reusing intermediate outputs.</p>",ea,We,na,Ee,jo=`Use a LoRA from <code>lightx2v/Qwen-Image-Lightning</code> to speed up inference by reducing the | |
| number of steps. Refer to the code snippet below:`,ta,S,An,$o="Code",Ya,Le,aa,O,Po="<p>The <code>guidance_scale</code> parameter in the pipeline is there to support future guidance-distilled models when they come up. Note that passing <code>guidance_scale</code> to the pipeline is ineffective. To enable classifier-free guidance, please pass <code>true_cfg_scale</code> and <code>negative_prompt</code> (even an empty negative prompt like ” ”) should enable classifier-free guidance computations.</p>",sa,Ne,oa,Ge,Co='With <a href="/docs/diffusers/pr_12448/en/api/pipelines/qwenimage#diffusers.QwenImageEditPlusPipeline">QwenImageEditPlusPipeline</a>, one can provide multiple images as input reference.',ia,Be,ra,Ve,la,J,qe,Da,Sn,Zo="The QwenImage pipeline for text-to-image generation.",za,V,Re,Aa,On,Wo="Function invoked when calling the pipeline for generation.",Sa,K,Oa,ee,Xe,Ka,Kn,Eo=`Disable sliced VAE decoding. If <code>enable_vae_slicing</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,es,ne,Fe,ns,et,Lo=`Disable tiled VAE decoding. If <code>enable_vae_tiling</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,ts,te,He,as,nt,No=`Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to | |
| compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.`,ss,ae,Ye,os,tt,Go=`Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to | |
| compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow | |
| processing larger images.`,is,at,De,da,ze,pa,Q,Ae,rs,st,Bo="The QwenImage pipeline for text-to-image generation.",ls,q,Se,ds,ot,Vo="Function invoked when calling the pipeline for generation.",ps,se,cs,oe,Oe,ms,it,qo=`Disable sliced VAE decoding. If <code>enable_vae_slicing</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,gs,ie,Ke,us,rt,Ro=`Disable tiled VAE decoding. If <code>enable_vae_tiling</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,fs,re,en,hs,lt,Xo=`Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to | |
| compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.`,_s,le,nn,ws,dt,Fo=`Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to | |
| compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow | |
| processing larger images.`,bs,pt,tn,ca,an,ma,k,sn,ys,ct,Ho="The QwenImage pipeline for text-to-image generation.",Is,R,on,vs,mt,Yo="Function invoked when calling the pipeline for generation.",Ms,de,Ts,pe,rn,xs,gt,Do=`Disable sliced VAE decoding. If <code>enable_vae_slicing</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,Js,ce,ln,Qs,ut,zo=`Disable tiled VAE decoding. If <code>enable_vae_tiling</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,ks,me,dn,Us,ft,Ao=`Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to | |
| compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.`,js,ge,pn,$s,ht,So=`Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to | |
| compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow | |
| processing larger images.`,Ps,_t,cn,ga,mn,ua,U,gn,Cs,wt,Oo="The Qwen-Image-Edit pipeline for image editing.",Zs,X,un,Ws,bt,Ko="Function invoked when calling the pipeline for generation.",Es,ue,Ls,fe,fn,Ns,yt,ei=`Disable sliced VAE decoding. If <code>enable_vae_slicing</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,Gs,he,hn,Bs,It,ni=`Disable tiled VAE decoding. If <code>enable_vae_tiling</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,Vs,_e,_n,qs,vt,ti=`Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to | |
| compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.`,Rs,we,wn,Xs,Mt,ai=`Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to | |
| compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow | |
| processing larger images.`,Fs,Tt,bn,fa,yn,ha,j,In,Hs,xt,si="The Qwen-Image-Edit pipeline for image editing.",Ys,F,vn,Ds,Jt,oi="Function invoked when calling the pipeline for generation.",zs,be,As,ye,Mn,Ss,Qt,ii=`Disable sliced VAE decoding. If <code>enable_vae_slicing</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,Os,Ie,Tn,Ks,kt,ri=`Disable tiled VAE decoding. If <code>enable_vae_tiling</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,eo,ve,xn,no,Ut,li=`Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to | |
| compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.`,to,Me,Jn,ao,jt,di=`Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to | |
| compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow | |
| processing larger images.`,so,$t,Qn,_a,kn,wa,$,Un,oo,Pt,pi="The QwenImage pipeline for text-to-image generation.",io,H,jn,ro,Ct,ci="Function invoked when calling the pipeline for generation.",lo,Te,po,xe,$n,co,Zt,mi=`Disable sliced VAE decoding. If <code>enable_vae_slicing</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,mo,Je,Pn,go,Wt,gi=`Disable tiled VAE decoding. If <code>enable_vae_tiling</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,uo,Qe,Cn,fo,Et,ui=`Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to | |
| compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.`,ho,ke,Zn,_o,Lt,fi=`Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to | |
| compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow | |
| processing larger images.`,wo,Nt,Wn,ba,En,ya,G,Ln,bo,Gt,hi="The Qwen-Image-Edit pipeline for image editing.",yo,Y,Nn,Io,Bt,_i="Function invoked when calling the pipeline for generation.",vo,Ue,Mo,Vt,Gn,Ia,Bn,va,D,Vn,To,qt,wi="Output class for Stable Diffusion pipelines.",Ma,qn,Ta,zt,xa;return v=new B({props:{title:"QwenImage",local:"qwenimage",headingTag:"h1"}}),We=new B({props:{title:"LoRA for faster inference",local:"lora-for-faster-inference",headingTag:"h2"}}),Le=new A({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMERpZmZ1c2lvblBpcGVsaW5lJTJDJTIwRmxvd01hdGNoRXVsZXJEaXNjcmV0ZVNjaGVkdWxlciUwQWltcG9ydCUyMHRvcmNoJTIwJTBBaW1wb3J0JTIwbWF0aCUwQSUwQWNrcHRfaWQlMjAlM0QlMjAlMjJRd2VuJTJGUXdlbi1JbWFnZSUyMiUwQSUwQSUyMyUyMEZyb20lMEElMjMlMjBodHRwcyUzQSUyRiUyRmdpdGh1Yi5jb20lMkZNb2RlbFRDJTJGUXdlbi1JbWFnZS1MaWdodG5pbmclMkZibG9iJTJGMzQyMjYwZThmNTQ2OGQyZjI0ZDA4NGNlMDRmNTVlMTAxMDA3MTE4YiUyRmdlbmVyYXRlX3dpdGhfZGlmZnVzZXJzLnB5JTIzTDgyQzktTDk3QzEwJTBBc2NoZWR1bGVyX2NvbmZpZyUyMCUzRCUyMCU3QiUwQSUyMCUyMCUyMCUyMCUyMmJhc2VfaW1hZ2Vfc2VxX2xlbiUyMiUzQSUyMDI1NiUyQyUwQSUyMCUyMCUyMCUyMCUyMmJhc2Vfc2hpZnQlMjIlM0ElMjBtYXRoLmxvZygzKSUyQyUyMCUyMCUyMyUyMFdlJTIwdXNlJTIwc2hpZnQlM0QzJTIwaW4lMjBkaXN0aWxsYXRpb24lMEElMjAlMjAlMjAlMjAlMjJpbnZlcnRfc2lnbWFzJTIyJTNBJTIwRmFsc2UlMkMlMEElMjAlMjAlMjAlMjAlMjJtYXhfaW1hZ2Vfc2VxX2xlbiUyMiUzQSUyMDgxOTIlMkMlMEElMjAlMjAlMjAlMjAlMjJtYXhfc2hpZnQlMjIlM0ElMjBtYXRoLmxvZygzKSUyQyUyMCUyMCUyMyUyMFdlJTIwdXNlJTIwc2hpZnQlM0QzJTIwaW4lMjBkaXN0aWxsYXRpb24lMEElMjAlMjAlMjAlMjAlMjJudW1fdHJhaW5fdGltZXN0ZXBzJTIyJTNBJTIwMTAwMCUyQyUwQSUyMCUyMCUyMCUyMCUyMnNoaWZ0JTIyJTNBJTIwMS4wJTJDJTBBJTIwJTIwJTIwJTIwJTIyc2hpZnRfdGVybWluYWwlMjIlM0ElMjBOb25lJTJDJTIwJTIwJTIzJTIwc2V0JTIwc2hpZnRfdGVybWluYWwlMjB0byUyME5vbmUlMEElMjAlMjAlMjAlMjAlMjJzdG9jaGFzdGljX3NhbXBsaW5nJTIyJTNBJTIwRmFsc2UlMkMlMEElMjAlMjAlMjAlMjAlMjJ0aW1lX3NoaWZ0X3R5cGUlMjIlM0ElMjAlMjJleHBvbmVudGlhbCUyMiUyQyUwQSUyMCUyMCUyMCUyMCUyMnVzZV9iZXRhX3NpZ21hcyUyMiUzQSUyMEZhbHNlJTJDJTBBJTIwJTIwJTIwJTIwJTIydXNlX2R5bmFtaWNfc2hpZnRpbmclMjIlM0ElMjBUcnVlJTJDJTBBJTIwJTIwJTIwJTIwJTIydXNlX2V4cG9uZW50aWFsX3NpZ21hcyUyMiUzQSUyMEZhbHNlJTJDJTBBJTIwJTIwJTIwJTIwJTIydXNlX2thcnJhc19zaWdtYXMlMjIlM0ElMjBGYWxzZSUyQyUwQSU3RCUwQXNjaGVkdWxlciUyMCUzRCUyMEZsb3dNYXRjaEV1bGVyRGlzY3JldGVTY2hlZHVsZXIuZnJvbV9jb25maWcoc2NoZWR1bGVyX2NvbmZpZyklMEFwaXBlJTIwJTNEJTIwRGlmZnVzaW9uUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMGNrcHRfaWQlMkMlMjBzY2hlZHVsZXIlM0RzY2hlZHVsZXIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2JTBBKS50byglMjJjdWRhJTIyKSUwQXBpcGUubG9hZF9sb3JhX3dlaWdodHMoJTBBJTIwJTIwJTIwJTIwJTIybGlnaHR4MnYlMkZRd2VuLUltYWdlLUxpZ2h0bmluZyUyMiUyQyUyMHdlaWdodF9uYW1lJTNEJTIyUXdlbi1JbWFnZS1MaWdodG5pbmctOHN0ZXBzLVYxLjAuc2FmZXRlbnNvcnMlMjIlMEEpJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTIyYSUyMHRpbnklMjBhc3Ryb25hdXQlMjBoYXRjaGluZyUyMGZyb20lMjBhbiUyMGVnZyUyMG9uJTIwdGhlJTIwbW9vbiUyQyUyMFVsdHJhJTIwSEQlMkMlMjA0SyUyQyUyMGNpbmVtYXRpYyUyMGNvbXBvc2l0aW9uLiUyMiUwQW5lZ2F0aXZlX3Byb21wdCUyMCUzRCUyMCUyMiUyMCUyMiUwQWltYWdlJTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjBwcm9tcHQlM0Rwcm9tcHQlMkMlMEElMjAlMjAlMjAlMjBuZWdhdGl2ZV9wcm9tcHQlM0RuZWdhdGl2ZV9wcm9tcHQlMkMlMEElMjAlMjAlMjAlMjB3aWR0aCUzRDEwMjQlMkMlMEElMjAlMjAlMjAlMjBoZWlnaHQlM0QxMDI0JTJDJTBBJTIwJTIwJTIwJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDglMkMlMEElMjAlMjAlMjAlMjB0cnVlX2NmZ19zY2FsZSUzRDEuMCUyQyUwQSUyMCUyMCUyMCUyMGdlbmVyYXRvciUzRHRvcmNoLm1hbnVhbF9zZWVkKDApJTJDJTBBKS5pbWFnZXMlNUIwJTVEJTBBaW1hZ2Uuc2F2ZSglMjJxd2VuX2Zld3N0ZXBzLnBuZyUyMik=",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> DiffusionPipeline, FlowMatchEulerDiscreteScheduler | |
| <span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">import</span> math | |
| ckpt_id = <span class="hljs-string">"Qwen/Qwen-Image"</span> | |
| <span class="hljs-comment"># From</span> | |
| <span class="hljs-comment"># https://github.com/ModelTC/Qwen-Image-Lightning/blob/342260e8f5468d2f24d084ce04f55e101007118b/generate_with_diffusers.py#L82C9-L97C10</span> | |
| scheduler_config = { | |
| <span class="hljs-string">"base_image_seq_len"</span>: <span class="hljs-number">256</span>, | |
| <span class="hljs-string">"base_shift"</span>: math.log(<span class="hljs-number">3</span>), <span class="hljs-comment"># We use shift=3 in distillation</span> | |
| <span class="hljs-string">"invert_sigmas"</span>: <span class="hljs-literal">False</span>, | |
| <span class="hljs-string">"max_image_seq_len"</span>: <span class="hljs-number">8192</span>, | |
| <span class="hljs-string">"max_shift"</span>: math.log(<span class="hljs-number">3</span>), <span class="hljs-comment"># We use shift=3 in distillation</span> | |
| <span class="hljs-string">"num_train_timesteps"</span>: <span class="hljs-number">1000</span>, | |
| <span class="hljs-string">"shift"</span>: <span class="hljs-number">1.0</span>, | |
| <span class="hljs-string">"shift_terminal"</span>: <span class="hljs-literal">None</span>, <span class="hljs-comment"># set shift_terminal to None</span> | |
| <span class="hljs-string">"stochastic_sampling"</span>: <span class="hljs-literal">False</span>, | |
| <span class="hljs-string">"time_shift_type"</span>: <span class="hljs-string">"exponential"</span>, | |
| <span class="hljs-string">"use_beta_sigmas"</span>: <span class="hljs-literal">False</span>, | |
| <span class="hljs-string">"use_dynamic_shifting"</span>: <span class="hljs-literal">True</span>, | |
| <span class="hljs-string">"use_exponential_sigmas"</span>: <span class="hljs-literal">False</span>, | |
| <span class="hljs-string">"use_karras_sigmas"</span>: <span class="hljs-literal">False</span>, | |
| } | |
| scheduler = FlowMatchEulerDiscreteScheduler.from_config(scheduler_config) | |
| pipe = DiffusionPipeline.from_pretrained( | |
| ckpt_id, scheduler=scheduler, torch_dtype=torch.bfloat16 | |
| ).to(<span class="hljs-string">"cuda"</span>) | |
| pipe.load_lora_weights( | |
| <span class="hljs-string">"lightx2v/Qwen-Image-Lightning"</span>, weight_name=<span class="hljs-string">"Qwen-Image-Lightning-8steps-V1.0.safetensors"</span> | |
| ) | |
| prompt = <span class="hljs-string">"a tiny astronaut hatching from an egg on the moon, Ultra HD, 4K, cinematic composition."</span> | |
| negative_prompt = <span class="hljs-string">" "</span> | |
| image = pipe( | |
| prompt=prompt, | |
| negative_prompt=negative_prompt, | |
| width=<span class="hljs-number">1024</span>, | |
| height=<span class="hljs-number">1024</span>, | |
| num_inference_steps=<span class="hljs-number">8</span>, | |
| true_cfg_scale=<span class="hljs-number">1.0</span>, | |
| generator=torch.manual_seed(<span class="hljs-number">0</span>), | |
| ).images[<span class="hljs-number">0</span>] | |
| image.save(<span class="hljs-string">"qwen_fewsteps.png"</span>)`,wrap:!1}}),Ne=new B({props:{title:"Multi-image reference with QwenImageEditPlusPipeline",local:"multi-image-reference-with-qwenimageeditpluspipeline",headingTag:"h2"}}),Be=new A({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwUElMJTIwaW1wb3J0JTIwSW1hZ2UlMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwUXdlbkltYWdlRWRpdFBsdXNQaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBsb2FkX2ltYWdlJTBBJTBBcGlwZSUyMCUzRCUyMFF3ZW5JbWFnZUVkaXRQbHVzUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMlF3ZW4lMkZRd2VuLUltYWdlLUVkaXQtMjUwOSUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYlMEEpLnRvKCUyMmN1ZGElMjIpJTBBJTBBaW1hZ2VfMSUyMCUzRCUyMGxvYWRfaW1hZ2UoJTIyaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRhdGFzZXRzJTJGaHVnZ2luZ2ZhY2UlMkZkb2N1bWVudGF0aW9uLWltYWdlcyUyRnJlc29sdmUlMkZtYWluJTJGZGlmZnVzZXJzJTJGZ3J1bXB5LmpwZyUyMiklMEFpbWFnZV8yJTIwJTNEJTIwbG9hZF9pbWFnZSglMjJodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGZGF0YXNldHMlMkZodWdnaW5nZmFjZSUyRmRvY3VtZW50YXRpb24taW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZkaWZmdXNlcnMlMkZwZW5nLnBuZyUyMiklMEFpbWFnZSUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwaW1hZ2UlM0QlNUJpbWFnZV8xJTJDJTIwaW1hZ2VfMiU1RCUyQyUyMCUwQSUyMCUyMCUyMCUyMHByb21wdCUzRCcnJ3B1dCUyMHRoZSUyMHBlbmd1aW4lMjBhbmQlMjB0aGUlMjBjYXQlMjBhdCUyMGElMjBnYW1lJTIwc2hvdyUyMGNhbGxlZCUyMCUyMlF3ZW4lMjBFZGl0JTIwUGx1cyUyMEdhbWVzJTIyJycnJTJDJTIwJTBBJTIwJTIwJTIwJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDUwJTBBKS5pbWFnZXMlNUIwJTVE",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> QwenImageEditPlusPipeline | |
| <span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| pipe = QwenImageEditPlusPipeline.from_pretrained( | |
| <span class="hljs-string">"Qwen/Qwen-Image-Edit-2509"</span>, torch_dtype=torch.bfloat16 | |
| ).to(<span class="hljs-string">"cuda"</span>) | |
| image_1 = load_image(<span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/grumpy.jpg"</span>) | |
| image_2 = load_image(<span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/peng.png"</span>) | |
| image = pipe( | |
| image=[image_1, image_2], | |
| prompt=<span class="hljs-string">'''put the penguin and the cat at a game show called "Qwen Edit Plus Games"'''</span>, | |
| num_inference_steps=<span class="hljs-number">50</span> | |
| ).images[<span class="hljs-number">0</span>]`,wrap:!1}}),Ve=new B({props:{title:"QwenImagePipeline",local:"diffusers.QwenImagePipeline",headingTag:"h2"}}),qe=new I({props:{name:"class diffusers.QwenImagePipeline",anchor:"diffusers.QwenImagePipeline",parameters:[{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKLQwenImage"},{name:"text_encoder",val:": Qwen2_5_VLForConditionalGeneration"},{name:"tokenizer",val:": Qwen2Tokenizer"},{name:"transformer",val:": QwenImageTransformer2DModel"}],parametersDescription:[{anchor:"diffusers.QwenImagePipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_12448/en/api/models/qwenimage_transformer2d#diffusers.QwenImageTransformer2DModel">QwenImageTransformer2DModel</a>) — | |
| Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.`,name:"transformer"},{anchor:"diffusers.QwenImagePipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_12448/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) — | |
| A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.QwenImagePipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_12448/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.QwenImagePipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>Qwen2.5-VL-7B-Instruct</code>) — | |
| <a href="https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct" rel="nofollow">Qwen2.5-VL-7B-Instruct</a>, specifically the | |
| <a href="https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct" rel="nofollow">Qwen2.5-VL-7B-Instruct</a> variant.`,name:"text_encoder"},{anchor:"diffusers.QwenImagePipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>QwenTokenizer</code>) — | |
| Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"}],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage.py#L132"}}),Re=new I({props:{name:"__call__",anchor:"diffusers.QwenImagePipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"true_cfg_scale",val:": float = 4.0"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"num_inference_steps",val:": int = 50"},{name:"sigmas",val:": typing.Optional[typing.List[float]] = None"},{name:"guidance_scale",val:": typing.Optional[float] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 512"}],parametersDescription:[{anchor:"diffusers.QwenImagePipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.QwenImagePipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>true_cfg_scale</code> is | |
| not greater than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.QwenImagePipeline.__call__.true_cfg_scale",description:`<strong>true_cfg_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) — | |
| Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion | |
| Guidance</a>. <code>true_cfg_scale</code> is defined as <code>w</code> of equation 2. | |
| of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Classifier-free guidance is enabled by | |
| setting <code>true_cfg_scale > 1</code> and a provided <code>negative_prompt</code>. Higher guidance scale encourages to | |
| generate images that are closely linked to the text <code>prompt</code>, usually at the expense of lower image | |
| quality.`,name:"true_cfg_scale"},{anchor:"diffusers.QwenImagePipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The height in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"height"},{anchor:"diffusers.QwenImagePipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The width in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"width"},{anchor:"diffusers.QwenImagePipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.QwenImagePipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) — | |
| Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in | |
| their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed | |
| will be used.`,name:"sigmas"},{anchor:"diffusers.QwenImagePipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to None) — | |
| A guidance scale value for guidance distilled models. Unlike the traditional classifier-free guidance | |
| where the guidance scale is applied during inference through noise prediction rescaling, guidance | |
| distilled models take the guidance scale directly as an input parameter during forward pass. Guidance | |
| scale is enabled by setting <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images | |
| that are closely linked to the text <code>prompt</code>, usually at the expense of lower image quality. This | |
| parameter in the pipeline is there to support future guidance-distilled models when they come up. It is | |
| ignored when not using guidance distilled models. To enable traditional classifier-free guidance, | |
| please pass <code>true_cfg_scale > 1.0</code> and <code>negative_prompt</code> (even an empty negative prompt like ” ” should | |
| enable classifier-free guidance computations).`,name:"guidance_scale"},{anchor:"diffusers.QwenImagePipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.QwenImagePipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.QwenImagePipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will be generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.QwenImagePipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.QwenImagePipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.QwenImagePipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.QwenImagePipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.qwenimage.QwenImagePipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.QwenImagePipeline.__call__.attention_kwargs",description:`<strong>attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"attention_kwargs"},{anchor:"diffusers.QwenImagePipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.QwenImagePipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.QwenImagePipeline.__call__.max_sequence_length",description:"<strong>max_sequence_length</strong> (<code>int</code> defaults to 512) — Maximum sequence length to use with the <code>prompt</code>.",name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage.py#L451",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.qwenimage.QwenImagePipelineOutput</code> if <code>return_dict</code> is True, otherwise a <code>tuple</code>. When | |
| returning a tuple, the first element is a list with the generated images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.qwenimage.QwenImagePipelineOutput</code> or <code>tuple</code></p> | |
| `}}),K=new Dn({props:{anchor:"diffusers.QwenImagePipeline.__call__.example",$$slots:{default:[Pi]},$$scope:{ctx:C}}}),Xe=new I({props:{name:"disable_vae_slicing",anchor:"diffusers.QwenImagePipeline.disable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage.py#L359"}}),Fe=new I({props:{name:"disable_vae_tiling",anchor:"diffusers.QwenImagePipeline.disable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage.py#L386"}}),He=new I({props:{name:"enable_vae_slicing",anchor:"diffusers.QwenImagePipeline.enable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage.py#L346"}}),Ye=new I({props:{name:"enable_vae_tiling",anchor:"diffusers.QwenImagePipeline.enable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage.py#L372"}}),De=new I({props:{name:"encode_prompt",anchor:"diffusers.QwenImagePipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"max_sequence_length",val:": int = 1024"}],parametersDescription:[{anchor:"diffusers.QwenImagePipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.QwenImagePipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>): | |
| torch device`,name:"device"},{anchor:"diffusers.QwenImagePipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.QwenImagePipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"}],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage.py#L226"}}),ze=new B({props:{title:"QwenImageImg2ImgPipeline",local:"diffusers.QwenImageImg2ImgPipeline",headingTag:"h2"}}),Ae=new I({props:{name:"class diffusers.QwenImageImg2ImgPipeline",anchor:"diffusers.QwenImageImg2ImgPipeline",parameters:[{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKLQwenImage"},{name:"text_encoder",val:": Qwen2_5_VLForConditionalGeneration"},{name:"tokenizer",val:": Qwen2Tokenizer"},{name:"transformer",val:": QwenImageTransformer2DModel"}],parametersDescription:[{anchor:"diffusers.QwenImageImg2ImgPipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_12448/en/api/models/qwenimage_transformer2d#diffusers.QwenImageTransformer2DModel">QwenImageTransformer2DModel</a>) — | |
| Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.`,name:"transformer"},{anchor:"diffusers.QwenImageImg2ImgPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_12448/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) — | |
| A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.QwenImageImg2ImgPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_12448/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.QwenImageImg2ImgPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>Qwen2.5-VL-7B-Instruct</code>) — | |
| <a href="https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct" rel="nofollow">Qwen2.5-VL-7B-Instruct</a>, specifically the | |
| <a href="https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct" rel="nofollow">Qwen2.5-VL-7B-Instruct</a> variant.`,name:"text_encoder"},{anchor:"diffusers.QwenImageImg2ImgPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>QwenTokenizer</code>) — | |
| Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"}],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_img2img.py#L134"}}),Se=new I({props:{name:"__call__",anchor:"diffusers.QwenImageImg2ImgPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"true_cfg_scale",val:": float = 4.0"},{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"strength",val:": float = 0.6"},{name:"num_inference_steps",val:": int = 50"},{name:"sigmas",val:": typing.Optional[typing.List[float]] = None"},{name:"guidance_scale",val:": typing.Optional[float] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 512"}],parametersDescription:[{anchor:"diffusers.QwenImageImg2ImgPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.QwenImageImg2ImgPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>true_cfg_scale</code> is | |
| not greater than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.QwenImageImg2ImgPipeline.__call__.image",description:`<strong>image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, or <code>List[np.ndarray]</code>) — | |
| <code>Image</code>, numpy array or tensor representing an image batch to be used as the starting point. For both | |
| numpy array and pytorch tensor, the expected value range is between <code>[0, 1]</code> If it’s a tensor or a list | |
| or tensors, the expected shape should be <code>(B, C, H, W)</code> or <code>(C, H, W)</code>. If it is a numpy array or a | |
| list of arrays, the expected shape should be <code>(B, H, W, C)</code> or <code>(H, W, C)</code> It can also accept image | |
| latents as <code>image</code>, but if passing latents directly it is not encoded again.`,name:"image"},{anchor:"diffusers.QwenImageImg2ImgPipeline.__call__.true_cfg_scale",description:`<strong>true_cfg_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) — | |
| Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion | |
| Guidance</a>. <code>true_cfg_scale</code> is defined as <code>w</code> of equation 2. | |
| of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Classifier-free guidance is enabled by | |
| setting <code>true_cfg_scale > 1</code> and a provided <code>negative_prompt</code>. Higher guidance scale encourages to | |
| generate images that are closely linked to the text <code>prompt</code>, usually at the expense of lower image | |
| quality.`,name:"true_cfg_scale"},{anchor:"diffusers.QwenImageImg2ImgPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The height in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"height"},{anchor:"diffusers.QwenImageImg2ImgPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The width in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"width"},{anchor:"diffusers.QwenImageImg2ImgPipeline.__call__.strength",description:`<strong>strength</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) — | |
| Indicates extent to transform the reference <code>image</code>. Must be between 0 and 1. <code>image</code> is used as a | |
| starting point and more noise is added the higher the <code>strength</code>. The number of denoising steps depends | |
| on the amount of noise initially added. When <code>strength</code> is 1, added noise is maximum and the denoising | |
| process runs for the full number of iterations specified in <code>num_inference_steps</code>. A value of 1 | |
| essentially ignores <code>image</code>.`,name:"strength"},{anchor:"diffusers.QwenImageImg2ImgPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.QwenImageImg2ImgPipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) — | |
| Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in | |
| their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed | |
| will be used.`,name:"sigmas"},{anchor:"diffusers.QwenImageImg2ImgPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to None) — | |
| A guidance scale value for guidance distilled models. Unlike the traditional classifier-free guidance | |
| where the guidance scale is applied during inference through noise prediction rescaling, guidance | |
| distilled models take the guidance scale directly as an input parameter during forward pass. Guidance | |
| scale is enabled by setting <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images | |
| that are closely linked to the text <code>prompt</code>, usually at the expense of lower image quality. This | |
| parameter in the pipeline is there to support future guidance-distilled models when they come up. It is | |
| ignored when not using guidance distilled models. To enable traditional classifier-free guidance, | |
| please pass <code>true_cfg_scale > 1.0</code> and <code>negative_prompt</code> (even an empty negative prompt like ” ” should | |
| enable classifier-free guidance computations).`,name:"guidance_scale"},{anchor:"diffusers.QwenImageImg2ImgPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.QwenImageImg2ImgPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.QwenImageImg2ImgPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will be generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.QwenImageImg2ImgPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.QwenImageImg2ImgPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.QwenImageImg2ImgPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.QwenImageImg2ImgPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.qwenimage.QwenImagePipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.QwenImageImg2ImgPipeline.__call__.attention_kwargs",description:`<strong>attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"attention_kwargs"},{anchor:"diffusers.QwenImageImg2ImgPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.QwenImageImg2ImgPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.QwenImageImg2ImgPipeline.__call__.max_sequence_length",description:"<strong>max_sequence_length</strong> (<code>int</code> defaults to 512) — Maximum sequence length to use with the <code>prompt</code>.",name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_img2img.py#L525",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.qwenimage.QwenImagePipelineOutput</code> if <code>return_dict</code> is True, otherwise a <code>tuple</code>. When | |
| returning a tuple, the first element is a list with the generated images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.qwenimage.QwenImagePipelineOutput</code> or <code>tuple</code></p> | |
| `}}),se=new Dn({props:{anchor:"diffusers.QwenImageImg2ImgPipeline.__call__.example",$$slots:{default:[Ci]},$$scope:{ctx:C}}}),Oe=new I({props:{name:"disable_vae_slicing",anchor:"diffusers.QwenImageImg2ImgPipeline.disable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_img2img.py#L408"}}),Ke=new I({props:{name:"disable_vae_tiling",anchor:"diffusers.QwenImageImg2ImgPipeline.disable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_img2img.py#L435"}}),en=new I({props:{name:"enable_vae_slicing",anchor:"diffusers.QwenImageImg2ImgPipeline.enable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_img2img.py#L395"}}),nn=new I({props:{name:"enable_vae_tiling",anchor:"diffusers.QwenImageImg2ImgPipeline.enable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_img2img.py#L421"}}),tn=new I({props:{name:"encode_prompt",anchor:"diffusers.QwenImageImg2ImgPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"max_sequence_length",val:": int = 1024"}],parametersDescription:[{anchor:"diffusers.QwenImageImg2ImgPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.QwenImageImg2ImgPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>): | |
| torch device`,name:"device"},{anchor:"diffusers.QwenImageImg2ImgPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.QwenImageImg2ImgPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"}],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_img2img.py#L269"}}),an=new B({props:{title:"QwenImageInpaintPipeline",local:"diffusers.QwenImageInpaintPipeline",headingTag:"h2"}}),sn=new I({props:{name:"class diffusers.QwenImageInpaintPipeline",anchor:"diffusers.QwenImageInpaintPipeline",parameters:[{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKLQwenImage"},{name:"text_encoder",val:": Qwen2_5_VLForConditionalGeneration"},{name:"tokenizer",val:": Qwen2Tokenizer"},{name:"transformer",val:": QwenImageTransformer2DModel"}],parametersDescription:[{anchor:"diffusers.QwenImageInpaintPipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_12448/en/api/models/qwenimage_transformer2d#diffusers.QwenImageTransformer2DModel">QwenImageTransformer2DModel</a>) — | |
| Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.`,name:"transformer"},{anchor:"diffusers.QwenImageInpaintPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_12448/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) — | |
| A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.QwenImageInpaintPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_12448/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.QwenImageInpaintPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>Qwen2.5-VL-7B-Instruct</code>) — | |
| <a href="https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct" rel="nofollow">Qwen2.5-VL-7B-Instruct</a>, specifically the | |
| <a href="https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct" rel="nofollow">Qwen2.5-VL-7B-Instruct</a> variant.`,name:"text_encoder"},{anchor:"diffusers.QwenImageInpaintPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>QwenTokenizer</code>) — | |
| Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"}],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py#L137"}}),on=new I({props:{name:"__call__",anchor:"diffusers.QwenImageInpaintPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"true_cfg_scale",val:": float = 4.0"},{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"mask_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"masked_image_latents",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"padding_mask_crop",val:": typing.Optional[int] = None"},{name:"strength",val:": float = 0.6"},{name:"num_inference_steps",val:": int = 50"},{name:"sigmas",val:": typing.Optional[typing.List[float]] = None"},{name:"guidance_scale",val:": typing.Optional[float] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 512"}],parametersDescription:[{anchor:"diffusers.QwenImageInpaintPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.QwenImageInpaintPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>true_cfg_scale</code> is | |
| not greater than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.QwenImageInpaintPipeline.__call__.image",description:`<strong>image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, or <code>List[np.ndarray]</code>) — | |
| <code>Image</code>, numpy array or tensor representing an image batch to be used as the starting point. For both | |
| numpy array and pytorch tensor, the expected value range is between <code>[0, 1]</code> If it’s a tensor or a list | |
| or tensors, the expected shape should be <code>(B, C, H, W)</code> or <code>(C, H, W)</code>. If it is a numpy array or a | |
| list of arrays, the expected shape should be <code>(B, H, W, C)</code> or <code>(H, W, C)</code> It can also accept image | |
| latents as <code>image</code>, but if passing latents directly it is not encoded again.`,name:"image"},{anchor:"diffusers.QwenImageInpaintPipeline.__call__.true_cfg_scale",description:`<strong>true_cfg_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) — | |
| Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion | |
| Guidance</a>. <code>true_cfg_scale</code> is defined as <code>w</code> of equation 2. | |
| of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Classifier-free guidance is enabled by | |
| setting <code>true_cfg_scale > 1</code> and a provided <code>negative_prompt</code>. Higher guidance scale encourages to | |
| generate images that are closely linked to the text <code>prompt</code>, usually at the expense of lower image | |
| quality.`,name:"true_cfg_scale"},{anchor:"diffusers.QwenImageInpaintPipeline.__call__.mask_image",description:`<strong>mask_image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, or <code>List[np.ndarray]</code>) — | |
| <code>Image</code>, numpy array or tensor representing an image batch to mask <code>image</code>. White pixels in the mask | |
| are repainted while black pixels are preserved. If <code>mask_image</code> is a PIL image, it is converted to a | |
| single channel (luminance) before use. If it’s a numpy array or pytorch tensor, it should contain one | |
| color channel (L) instead of 3, so the expected shape for pytorch tensor would be <code>(B, 1, H, W)</code>, <code>(B, H, W)</code>, <code>(1, H, W)</code>, <code>(H, W)</code>. And for numpy array would be for <code>(B, H, W, 1)</code>, <code>(B, H, W)</code>, <code>(H, W, 1)</code>, or <code>(H, W)</code>.`,name:"mask_image"},{anchor:"diffusers.QwenImageInpaintPipeline.__call__.mask_image_latent",description:`<strong>mask_image_latent</strong> (<code>torch.Tensor</code>, <code>List[torch.Tensor]</code>) — | |
| <code>Tensor</code> representing an image batch to mask <code>image</code> generated by VAE. If not provided, the mask | |
| latents tensor will be generated by <code>mask_image</code>.`,name:"mask_image_latent"},{anchor:"diffusers.QwenImageInpaintPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The height in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"height"},{anchor:"diffusers.QwenImageInpaintPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The width in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"width"},{anchor:"diffusers.QwenImageInpaintPipeline.__call__.padding_mask_crop",description:`<strong>padding_mask_crop</strong> (<code>int</code>, <em>optional</em>, defaults to <code>None</code>) — | |
| The size of margin in the crop to be applied to the image and masking. If <code>None</code>, no crop is applied to | |
| image and mask_image. If <code>padding_mask_crop</code> is not <code>None</code>, it will first find a rectangular region | |
| with the same aspect ration of the image and contains all masked area, and then expand that area based | |
| on <code>padding_mask_crop</code>. The image and mask_image will then be cropped based on the expanded area before | |
| resizing to the original image size for inpainting. This is useful when the masked area is small while | |
| the image is large and contain information irrelevant for inpainting, such as background.`,name:"padding_mask_crop"},{anchor:"diffusers.QwenImageInpaintPipeline.__call__.strength",description:`<strong>strength</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) — | |
| Indicates extent to transform the reference <code>image</code>. Must be between 0 and 1. <code>image</code> is used as a | |
| starting point and more noise is added the higher the <code>strength</code>. The number of denoising steps depends | |
| on the amount of noise initially added. When <code>strength</code> is 1, added noise is maximum and the denoising | |
| process runs for the full number of iterations specified in <code>num_inference_steps</code>. A value of 1 | |
| essentially ignores <code>image</code>.`,name:"strength"},{anchor:"diffusers.QwenImageInpaintPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.QwenImageInpaintPipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) — | |
| Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in | |
| their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed | |
| will be used.`,name:"sigmas"},{anchor:"diffusers.QwenImageInpaintPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to None) — | |
| A guidance scale value for guidance distilled models. Unlike the traditional classifier-free guidance | |
| where the guidance scale is applied during inference through noise prediction rescaling, guidance | |
| distilled models take the guidance scale directly as an input parameter during forward pass. Guidance | |
| scale is enabled by setting <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images | |
| that are closely linked to the text <code>prompt</code>, usually at the expense of lower image quality. This | |
| parameter in the pipeline is there to support future guidance-distilled models when they come up. It is | |
| ignored when not using guidance distilled models. To enable traditional classifier-free guidance, | |
| please pass <code>true_cfg_scale > 1.0</code> and <code>negative_prompt</code> (even an empty negative prompt like ” ” should | |
| enable classifier-free guidance computations).`,name:"guidance_scale"},{anchor:"diffusers.QwenImageInpaintPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.QwenImageInpaintPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.QwenImageInpaintPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will be generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.QwenImageInpaintPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.QwenImageInpaintPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.QwenImageInpaintPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.QwenImageInpaintPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.qwenimage.QwenImagePipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.QwenImageInpaintPipeline.__call__.attention_kwargs",description:`<strong>attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"attention_kwargs"},{anchor:"diffusers.QwenImageInpaintPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.QwenImageInpaintPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.QwenImageInpaintPipeline.__call__.max_sequence_length",description:"<strong>max_sequence_length</strong> (<code>int</code> defaults to 512) — Maximum sequence length to use with the <code>prompt</code>.",name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py#L635",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.qwenimage.QwenImagePipelineOutput</code> if <code>return_dict</code> is True, otherwise a <code>tuple</code>. When | |
| returning a tuple, the first element is a list with the generated images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.qwenimage.QwenImagePipelineOutput</code> or <code>tuple</code></p> | |
| `}}),de=new Dn({props:{anchor:"diffusers.QwenImageInpaintPipeline.__call__.example",$$slots:{default:[Zi]},$$scope:{ctx:C}}}),rn=new I({props:{name:"disable_vae_slicing",anchor:"diffusers.QwenImageInpaintPipeline.disable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py#L435"}}),ln=new I({props:{name:"disable_vae_tiling",anchor:"diffusers.QwenImageInpaintPipeline.disable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py#L462"}}),dn=new I({props:{name:"enable_vae_slicing",anchor:"diffusers.QwenImageInpaintPipeline.enable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py#L422"}}),pn=new I({props:{name:"enable_vae_tiling",anchor:"diffusers.QwenImageInpaintPipeline.enable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py#L448"}}),cn=new I({props:{name:"encode_prompt",anchor:"diffusers.QwenImageInpaintPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"max_sequence_length",val:": int = 1024"}],parametersDescription:[{anchor:"diffusers.QwenImageInpaintPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.QwenImageInpaintPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>): | |
| torch device`,name:"device"},{anchor:"diffusers.QwenImageInpaintPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.QwenImageInpaintPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"}],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_inpaint.py#L280"}}),mn=new B({props:{title:"QwenImageEditPipeline",local:"diffusers.QwenImageEditPipeline",headingTag:"h2"}}),gn=new I({props:{name:"class diffusers.QwenImageEditPipeline",anchor:"diffusers.QwenImageEditPipeline",parameters:[{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKLQwenImage"},{name:"text_encoder",val:": Qwen2_5_VLForConditionalGeneration"},{name:"tokenizer",val:": Qwen2Tokenizer"},{name:"processor",val:": Qwen2VLProcessor"},{name:"transformer",val:": QwenImageTransformer2DModel"}],parametersDescription:[{anchor:"diffusers.QwenImageEditPipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_12448/en/api/models/qwenimage_transformer2d#diffusers.QwenImageTransformer2DModel">QwenImageTransformer2DModel</a>) — | |
| Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.`,name:"transformer"},{anchor:"diffusers.QwenImageEditPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_12448/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) — | |
| A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.QwenImageEditPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_12448/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.QwenImageEditPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>Qwen2.5-VL-7B-Instruct</code>) — | |
| <a href="https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct" rel="nofollow">Qwen2.5-VL-7B-Instruct</a>, specifically the | |
| <a href="https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct" rel="nofollow">Qwen2.5-VL-7B-Instruct</a> variant.`,name:"text_encoder"},{anchor:"diffusers.QwenImageEditPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>QwenTokenizer</code>) — | |
| Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"}],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py#L165"}}),un=new I({props:{name:"__call__",anchor:"diffusers.QwenImageEditPipeline.__call__",parameters:[{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor], NoneType] = None"},{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"true_cfg_scale",val:": float = 4.0"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"num_inference_steps",val:": int = 50"},{name:"sigmas",val:": typing.Optional[typing.List[float]] = None"},{name:"guidance_scale",val:": typing.Optional[float] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 512"}],parametersDescription:[{anchor:"diffusers.QwenImageEditPipeline.__call__.image",description:`<strong>image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, or <code>List[np.ndarray]</code>) — | |
| <code>Image</code>, numpy array or tensor representing an image batch to be used as the starting point. For both | |
| numpy array and pytorch tensor, the expected value range is between <code>[0, 1]</code> If it’s a tensor or a list | |
| or tensors, the expected shape should be <code>(B, C, H, W)</code> or <code>(C, H, W)</code>. If it is a numpy array or a | |
| list of arrays, the expected shape should be <code>(B, H, W, C)</code> or <code>(H, W, C)</code> It can also accept image | |
| latents as <code>image</code>, but if passing latents directly it is not encoded again.`,name:"image"},{anchor:"diffusers.QwenImageEditPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.QwenImageEditPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>true_cfg_scale</code> is | |
| not greater than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.QwenImageEditPipeline.__call__.true_cfg_scale",description:`<strong>true_cfg_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) — | |
| true_cfg_scale (<code>float</code>, <em>optional</em>, defaults to 1.0): Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free | |
| Diffusion Guidance</a>. <code>true_cfg_scale</code> is defined as <code>w</code> of | |
| equation 2. of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Classifier-free guidance is | |
| enabled by setting <code>true_cfg_scale > 1</code> and a provided <code>negative_prompt</code>. Higher guidance scale | |
| encourages to generate images that are closely linked to the text <code>prompt</code>, usually at the expense of | |
| lower image quality.`,name:"true_cfg_scale"},{anchor:"diffusers.QwenImageEditPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The height in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"height"},{anchor:"diffusers.QwenImageEditPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The width in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"width"},{anchor:"diffusers.QwenImageEditPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.QwenImageEditPipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) — | |
| Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in | |
| their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed | |
| will be used.`,name:"sigmas"},{anchor:"diffusers.QwenImageEditPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to None) — | |
| A guidance scale value for guidance distilled models. Unlike the traditional classifier-free guidance | |
| where the guidance scale is applied during inference through noise prediction rescaling, guidance | |
| distilled models take the guidance scale directly as an input parameter during forward pass. Guidance | |
| scale is enabled by setting <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images | |
| that are closely linked to the text <code>prompt</code>, usually at the expense of lower image quality. This | |
| parameter in the pipeline is there to support future guidance-distilled models when they come up. It is | |
| ignored when not using guidance distilled models. To enable traditional classifier-free guidance, | |
| please pass <code>true_cfg_scale > 1.0</code> and <code>negative_prompt</code> (even an empty negative prompt like ” ” should | |
| enable classifier-free guidance computations).`,name:"guidance_scale"},{anchor:"diffusers.QwenImageEditPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.QwenImageEditPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.QwenImageEditPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will be generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.QwenImageEditPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.QwenImageEditPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.QwenImageEditPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.QwenImageEditPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.qwenimage.QwenImagePipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.QwenImageEditPipeline.__call__.attention_kwargs",description:`<strong>attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"attention_kwargs"},{anchor:"diffusers.QwenImageEditPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.QwenImageEditPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.QwenImageEditPipeline.__call__.max_sequence_length",description:"<strong>max_sequence_length</strong> (<code>int</code> defaults to 512) — Maximum sequence length to use with the <code>prompt</code>.",name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py#L546",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.qwenimage.QwenImagePipelineOutput</code> if <code>return_dict</code> is True, otherwise a <code>tuple</code>. When | |
| returning a tuple, the first element is a list with the generated images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.qwenimage.QwenImagePipelineOutput</code> or <code>tuple</code></p> | |
| `}}),ue=new Dn({props:{anchor:"diffusers.QwenImageEditPipeline.__call__.example",$$slots:{default:[Wi]},$$scope:{ctx:C}}}),fn=new I({props:{name:"disable_vae_slicing",anchor:"diffusers.QwenImageEditPipeline.disable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py#L431"}}),hn=new I({props:{name:"disable_vae_tiling",anchor:"diffusers.QwenImageEditPipeline.disable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py#L458"}}),_n=new I({props:{name:"enable_vae_slicing",anchor:"diffusers.QwenImageEditPipeline.enable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py#L418"}}),wn=new I({props:{name:"enable_vae_tiling",anchor:"diffusers.QwenImageEditPipeline.enable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py#L444"}}),bn=new I({props:{name:"encode_prompt",anchor:"diffusers.QwenImageEditPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"image",val:": typing.Optional[torch.Tensor] = None"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"max_sequence_length",val:": int = 1024"}],parametersDescription:[{anchor:"diffusers.QwenImageEditPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.QwenImageEditPipeline.encode_prompt.image",description:`<strong>image</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| image to be encoded`,name:"image"},{anchor:"diffusers.QwenImageEditPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>): | |
| torch device`,name:"device"},{anchor:"diffusers.QwenImageEditPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.QwenImageEditPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"}],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py#L273"}}),yn=new B({props:{title:"QwenImageEditInpaintPipeline",local:"diffusers.QwenImageEditInpaintPipeline",headingTag:"h2"}}),In=new I({props:{name:"class diffusers.QwenImageEditInpaintPipeline",anchor:"diffusers.QwenImageEditInpaintPipeline",parameters:[{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKLQwenImage"},{name:"text_encoder",val:": Qwen2_5_VLForConditionalGeneration"},{name:"tokenizer",val:": Qwen2Tokenizer"},{name:"processor",val:": Qwen2VLProcessor"},{name:"transformer",val:": QwenImageTransformer2DModel"}],parametersDescription:[{anchor:"diffusers.QwenImageEditInpaintPipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_12448/en/api/models/qwenimage_transformer2d#diffusers.QwenImageTransformer2DModel">QwenImageTransformer2DModel</a>) — | |
| Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.`,name:"transformer"},{anchor:"diffusers.QwenImageEditInpaintPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_12448/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) — | |
| A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.QwenImageEditInpaintPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_12448/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.QwenImageEditInpaintPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>Qwen2.5-VL-7B-Instruct</code>) — | |
| <a href="https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct" rel="nofollow">Qwen2.5-VL-7B-Instruct</a>, specifically the | |
| <a href="https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct" rel="nofollow">Qwen2.5-VL-7B-Instruct</a> variant.`,name:"text_encoder"},{anchor:"diffusers.QwenImageEditInpaintPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>QwenTokenizer</code>) — | |
| Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"}],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit_inpaint.py#L167"}}),vn=new I({props:{name:"__call__",anchor:"diffusers.QwenImageEditInpaintPipeline.__call__",parameters:[{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor], NoneType] = None"},{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"mask_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"masked_image_latents",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"true_cfg_scale",val:": float = 4.0"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"padding_mask_crop",val:": typing.Optional[int] = None"},{name:"strength",val:": float = 0.6"},{name:"num_inference_steps",val:": int = 50"},{name:"sigmas",val:": typing.Optional[typing.List[float]] = None"},{name:"guidance_scale",val:": typing.Optional[float] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 512"}],parametersDescription:[{anchor:"diffusers.QwenImageEditInpaintPipeline.__call__.image",description:`<strong>image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, or <code>List[np.ndarray]</code>) — | |
| <code>Image</code>, numpy array or tensor representing an image batch to be used as the starting point. For both | |
| numpy array and pytorch tensor, the expected value range is between <code>[0, 1]</code> If it’s a tensor or a list | |
| or tensors, the expected shape should be <code>(B, C, H, W)</code> or <code>(C, H, W)</code>. If it is a numpy array or a | |
| list of arrays, the expected shape should be <code>(B, H, W, C)</code> or <code>(H, W, C)</code> It can also accept image | |
| latents as <code>image</code>, but if passing latents directly it is not encoded again.`,name:"image"},{anchor:"diffusers.QwenImageEditInpaintPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.QwenImageEditInpaintPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>true_cfg_scale</code> is | |
| not greater than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.QwenImageEditInpaintPipeline.__call__.true_cfg_scale",description:`<strong>true_cfg_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) — | |
| true_cfg_scale (<code>float</code>, <em>optional</em>, defaults to 1.0): Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free | |
| Diffusion Guidance</a>. <code>true_cfg_scale</code> is defined as <code>w</code> of | |
| equation 2. of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Classifier-free guidance is | |
| enabled by setting <code>true_cfg_scale > 1</code> and a provided <code>negative_prompt</code>. Higher guidance scale | |
| encourages to generate images that are closely linked to the text <code>prompt</code>, usually at the expense of | |
| lower image quality.`,name:"true_cfg_scale"},{anchor:"diffusers.QwenImageEditInpaintPipeline.__call__.mask_image",description:`<strong>mask_image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, or <code>List[np.ndarray]</code>) — | |
| <code>Image</code>, numpy array or tensor representing an image batch to mask <code>image</code>. White pixels in the mask | |
| are repainted while black pixels are preserved. If <code>mask_image</code> is a PIL image, it is converted to a | |
| single channel (luminance) before use. If it’s a numpy array or pytorch tensor, it should contain one | |
| color channel (L) instead of 3, so the expected shape for pytorch tensor would be <code>(B, 1, H, W)</code>, <code>(B, H, W)</code>, <code>(1, H, W)</code>, <code>(H, W)</code>. And for numpy array would be for <code>(B, H, W, 1)</code>, <code>(B, H, W)</code>, <code>(H, W, 1)</code>, or <code>(H, W)</code>.`,name:"mask_image"},{anchor:"diffusers.QwenImageEditInpaintPipeline.__call__.mask_image_latent",description:`<strong>mask_image_latent</strong> (<code>torch.Tensor</code>, <code>List[torch.Tensor]</code>) — | |
| <code>Tensor</code> representing an image batch to mask <code>image</code> generated by VAE. If not provided, the mask | |
| latents tensor will ge generated by <code>mask_image</code>.`,name:"mask_image_latent"},{anchor:"diffusers.QwenImageEditInpaintPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The height in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"height"},{anchor:"diffusers.QwenImageEditInpaintPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The width in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"width"},{anchor:"diffusers.QwenImageEditInpaintPipeline.__call__.padding_mask_crop",description:`<strong>padding_mask_crop</strong> (<code>int</code>, <em>optional</em>, defaults to <code>None</code>) — | |
| The size of margin in the crop to be applied to the image and masking. If <code>None</code>, no crop is applied to | |
| image and mask_image. If <code>padding_mask_crop</code> is not <code>None</code>, it will first find a rectangular region | |
| with the same aspect ration of the image and contains all masked area, and then expand that area based | |
| on <code>padding_mask_crop</code>. The image and mask_image will then be cropped based on the expanded area before | |
| resizing to the original image size for inpainting. This is useful when the masked area is small while | |
| the image is large and contain information irrelevant for inpainting, such as background.`,name:"padding_mask_crop"},{anchor:"diffusers.QwenImageEditInpaintPipeline.__call__.strength",description:`<strong>strength</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) — | |
| Indicates extent to transform the reference <code>image</code>. Must be between 0 and 1. <code>image</code> is used as a | |
| starting point and more noise is added the higher the <code>strength</code>. The number of denoising steps depends | |
| on the amount of noise initially added. When <code>strength</code> is 1, added noise is maximum and the denoising | |
| process runs for the full number of iterations specified in <code>num_inference_steps</code>. A value of 1 | |
| essentially ignores <code>image</code>.`,name:"strength"},{anchor:"diffusers.QwenImageEditInpaintPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.QwenImageEditInpaintPipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) — | |
| Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in | |
| their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed | |
| will be used.`,name:"sigmas"},{anchor:"diffusers.QwenImageEditInpaintPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to None) — | |
| A guidance scale value for guidance distilled models. Unlike the traditional classifier-free guidance | |
| where the guidance scale is applied during inference through noise prediction rescaling, guidance | |
| distilled models take the guidance scale directly as an input parameter during forward pass. Guidance | |
| scale is enabled by setting <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images | |
| that are closely linked to the text <code>prompt</code>, usually at the expense of lower image quality. This | |
| parameter in the pipeline is there to support future guidance-distilled models when they come up. It is | |
| ignored when not using guidance distilled models. To enable traditional classifier-free guidance, | |
| please pass <code>true_cfg_scale > 1.0</code> and <code>negative_prompt</code> (even an empty negative prompt like ” ” should | |
| enable classifier-free guidance computations).`,name:"guidance_scale"},{anchor:"diffusers.QwenImageEditInpaintPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.QwenImageEditInpaintPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.QwenImageEditInpaintPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will be generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.QwenImageEditInpaintPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.QwenImageEditInpaintPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.QwenImageEditInpaintPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.QwenImageEditInpaintPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.qwenimage.QwenImagePipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.QwenImageEditInpaintPipeline.__call__.attention_kwargs",description:`<strong>attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"attention_kwargs"},{anchor:"diffusers.QwenImageEditInpaintPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.QwenImageEditInpaintPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.QwenImageEditInpaintPipeline.__call__.max_sequence_length",description:"<strong>max_sequence_length</strong> (<code>int</code> defaults to 512) — Maximum sequence length to use with the <code>prompt</code>.",name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit_inpaint.py#L679",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.qwenimage.QwenImagePipelineOutput</code> if <code>return_dict</code> is True, otherwise a <code>tuple</code>. When | |
| returning a tuple, the first element is a list with the generated images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.qwenimage.QwenImagePipelineOutput</code> or <code>tuple</code></p> | |
| `}}),be=new Dn({props:{anchor:"diffusers.QwenImageEditInpaintPipeline.__call__.example",$$slots:{default:[Ei]},$$scope:{ctx:C}}}),Mn=new I({props:{name:"disable_vae_slicing",anchor:"diffusers.QwenImageEditInpaintPipeline.disable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit_inpaint.py#L477"}}),Tn=new I({props:{name:"disable_vae_tiling",anchor:"diffusers.QwenImageEditInpaintPipeline.disable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit_inpaint.py#L504"}}),xn=new I({props:{name:"enable_vae_slicing",anchor:"diffusers.QwenImageEditInpaintPipeline.enable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit_inpaint.py#L464"}}),Jn=new I({props:{name:"enable_vae_tiling",anchor:"diffusers.QwenImageEditInpaintPipeline.enable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit_inpaint.py#L490"}}),Qn=new I({props:{name:"encode_prompt",anchor:"diffusers.QwenImageEditInpaintPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"image",val:": typing.Optional[torch.Tensor] = None"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"max_sequence_length",val:": int = 1024"}],parametersDescription:[{anchor:"diffusers.QwenImageEditInpaintPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.QwenImageEditInpaintPipeline.encode_prompt.image",description:`<strong>image</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| image to be encoded`,name:"image"},{anchor:"diffusers.QwenImageEditInpaintPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>): | |
| torch device`,name:"device"},{anchor:"diffusers.QwenImageEditInpaintPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.QwenImageEditInpaintPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"}],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit_inpaint.py#L285"}}),kn=new B({props:{title:"QwenImageControlNetPipeline",local:"diffusers.QwenImageControlNetPipeline",headingTag:"h2"}}),Un=new I({props:{name:"class diffusers.QwenImageControlNetPipeline",anchor:"diffusers.QwenImageControlNetPipeline",parameters:[{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKLQwenImage"},{name:"text_encoder",val:": Qwen2_5_VLForConditionalGeneration"},{name:"tokenizer",val:": Qwen2Tokenizer"},{name:"transformer",val:": QwenImageTransformer2DModel"},{name:"controlnet",val:": typing.Union[diffusers.models.controlnets.controlnet_qwenimage.QwenImageControlNetModel, diffusers.models.controlnets.controlnet_qwenimage.QwenImageMultiControlNetModel]"}],parametersDescription:[{anchor:"diffusers.QwenImageControlNetPipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_12448/en/api/models/qwenimage_transformer2d#diffusers.QwenImageTransformer2DModel">QwenImageTransformer2DModel</a>) — | |
| Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.`,name:"transformer"},{anchor:"diffusers.QwenImageControlNetPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_12448/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) — | |
| A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.QwenImageControlNetPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_12448/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.QwenImageControlNetPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>Qwen2.5-VL-7B-Instruct</code>) — | |
| <a href="https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct" rel="nofollow">Qwen2.5-VL-7B-Instruct</a>, specifically the | |
| <a href="https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct" rel="nofollow">Qwen2.5-VL-7B-Instruct</a> variant.`,name:"text_encoder"},{anchor:"diffusers.QwenImageControlNetPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>QwenTokenizer</code>) — | |
| Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"}],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py#L192"}}),jn=new I({props:{name:"__call__",anchor:"diffusers.QwenImageControlNetPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"true_cfg_scale",val:": float = 4.0"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"num_inference_steps",val:": int = 50"},{name:"sigmas",val:": typing.Optional[typing.List[float]] = None"},{name:"guidance_scale",val:": typing.Optional[float] = None"},{name:"control_guidance_start",val:": typing.Union[float, typing.List[float]] = 0.0"},{name:"control_guidance_end",val:": typing.Union[float, typing.List[float]] = 1.0"},{name:"control_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"controlnet_conditioning_scale",val:": typing.Union[float, typing.List[float]] = 1.0"},{name:"num_images_per_prompt",val:": int = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 512"}],parametersDescription:[{anchor:"diffusers.QwenImageControlNetPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.QwenImageControlNetPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>true_cfg_scale</code> is | |
| not greater than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.QwenImageControlNetPipeline.__call__.true_cfg_scale",description:`<strong>true_cfg_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) — | |
| Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion | |
| Guidance</a>. <code>true_cfg_scale</code> is defined as <code>w</code> of equation 2. | |
| of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Classifier-free guidance is enabled by | |
| setting <code>true_cfg_scale > 1</code> and a provided <code>negative_prompt</code>. Higher guidance scale encourages to | |
| generate images that are closely linked to the text <code>prompt</code>, usually at the expense of lower image | |
| quality.`,name:"true_cfg_scale"},{anchor:"diffusers.QwenImageControlNetPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The height in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"height"},{anchor:"diffusers.QwenImageControlNetPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The width in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"width"},{anchor:"diffusers.QwenImageControlNetPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.QwenImageControlNetPipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) — | |
| Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in | |
| their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed | |
| will be used.`,name:"sigmas"},{anchor:"diffusers.QwenImageControlNetPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to None) — | |
| A guidance scale value for guidance distilled models. Unlike the traditional classifier-free guidance | |
| where the guidance scale is applied during inference through noise prediction rescaling, guidance | |
| distilled models take the guidance scale directly as an input parameter during forward pass. Guidance | |
| scale is enabled by setting <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images | |
| that are closely linked to the text <code>prompt</code>, usually at the expense of lower image quality. This | |
| parameter in the pipeline is there to support future guidance-distilled models when they come up. It is | |
| ignored when not using guidance distilled models. To enable traditional classifier-free guidance, | |
| please pass <code>true_cfg_scale > 1.0</code> and <code>negative_prompt</code> (even an empty negative prompt like ” ” should | |
| enable classifier-free guidance computations).`,name:"guidance_scale"},{anchor:"diffusers.QwenImageControlNetPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.QwenImageControlNetPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.QwenImageControlNetPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will be generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.QwenImageControlNetPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.QwenImageControlNetPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.QwenImageControlNetPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.QwenImageControlNetPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.qwenimage.QwenImagePipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.QwenImageControlNetPipeline.__call__.attention_kwargs",description:`<strong>attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"attention_kwargs"},{anchor:"diffusers.QwenImageControlNetPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.QwenImageControlNetPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.QwenImageControlNetPipeline.__call__.max_sequence_length",description:"<strong>max_sequence_length</strong> (<code>int</code> defaults to 512) — Maximum sequence length to use with the <code>prompt</code>.",name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py#L551",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.qwenimage.QwenImagePipelineOutput</code> if <code>return_dict</code> is True, otherwise a <code>tuple</code>. When | |
| returning a tuple, the first element is a list with the generated images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.qwenimage.QwenImagePipelineOutput</code> or <code>tuple</code></p> | |
| `}}),Te=new Dn({props:{anchor:"diffusers.QwenImageControlNetPipeline.__call__.example",$$slots:{default:[Li]},$$scope:{ctx:C}}}),$n=new I({props:{name:"disable_vae_slicing",anchor:"diffusers.QwenImageControlNetPipeline.disable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py#L423"}}),Pn=new I({props:{name:"disable_vae_tiling",anchor:"diffusers.QwenImageControlNetPipeline.disable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py#L450"}}),Cn=new I({props:{name:"enable_vae_slicing",anchor:"diffusers.QwenImageControlNetPipeline.enable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py#L410"}}),Zn=new I({props:{name:"enable_vae_tiling",anchor:"diffusers.QwenImageControlNetPipeline.enable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py#L436"}}),Wn=new I({props:{name:"encode_prompt",anchor:"diffusers.QwenImageControlNetPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"max_sequence_length",val:": int = 1024"}],parametersDescription:[{anchor:"diffusers.QwenImageControlNetPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.QwenImageControlNetPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>): | |
| torch device`,name:"device"},{anchor:"diffusers.QwenImageControlNetPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.QwenImageControlNetPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"}],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py#L291"}}),En=new B({props:{title:"QwenImageEditPlusPipeline",local:"diffusers.QwenImageEditPlusPipeline",headingTag:"h2"}}),Ln=new I({props:{name:"class diffusers.QwenImageEditPlusPipeline",anchor:"diffusers.QwenImageEditPlusPipeline",parameters:[{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKLQwenImage"},{name:"text_encoder",val:": Qwen2_5_VLForConditionalGeneration"},{name:"tokenizer",val:": Qwen2Tokenizer"},{name:"processor",val:": Qwen2VLProcessor"},{name:"transformer",val:": QwenImageTransformer2DModel"}],parametersDescription:[{anchor:"diffusers.QwenImageEditPlusPipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_12448/en/api/models/qwenimage_transformer2d#diffusers.QwenImageTransformer2DModel">QwenImageTransformer2DModel</a>) — | |
| Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.`,name:"transformer"},{anchor:"diffusers.QwenImageEditPlusPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_12448/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) — | |
| A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.QwenImageEditPlusPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_12448/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.QwenImageEditPlusPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>Qwen2.5-VL-7B-Instruct</code>) — | |
| <a href="https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct" rel="nofollow">Qwen2.5-VL-7B-Instruct</a>, specifically the | |
| <a href="https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct" rel="nofollow">Qwen2.5-VL-7B-Instruct</a> variant.`,name:"text_encoder"},{anchor:"diffusers.QwenImageEditPlusPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>QwenTokenizer</code>) — | |
| Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"}],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit_plus.py#L168"}}),Nn=new I({props:{name:"__call__",anchor:"diffusers.QwenImageEditPlusPipeline.__call__",parameters:[{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor], NoneType] = None"},{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"true_cfg_scale",val:": float = 4.0"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"num_inference_steps",val:": int = 50"},{name:"sigmas",val:": typing.Optional[typing.List[float]] = None"},{name:"guidance_scale",val:": typing.Optional[float] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 512"}],parametersDescription:[{anchor:"diffusers.QwenImageEditPlusPipeline.__call__.image",description:`<strong>image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, or <code>List[np.ndarray]</code>) — | |
| <code>Image</code>, numpy array or tensor representing an image batch to be used as the starting point. For both | |
| numpy array and pytorch tensor, the expected value range is between <code>[0, 1]</code> If it’s a tensor or a list | |
| or tensors, the expected shape should be <code>(B, C, H, W)</code> or <code>(C, H, W)</code>. If it is a numpy array or a | |
| list of arrays, the expected shape should be <code>(B, H, W, C)</code> or <code>(H, W, C)</code> It can also accept image | |
| latents as <code>image</code>, but if passing latents directly it is not encoded again.`,name:"image"},{anchor:"diffusers.QwenImageEditPlusPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.QwenImageEditPlusPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>true_cfg_scale</code> is | |
| not greater than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.QwenImageEditPlusPipeline.__call__.true_cfg_scale",description:`<strong>true_cfg_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) — | |
| true_cfg_scale (<code>float</code>, <em>optional</em>, defaults to 1.0): Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free | |
| Diffusion Guidance</a>. <code>true_cfg_scale</code> is defined as <code>w</code> of | |
| equation 2. of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Classifier-free guidance is | |
| enabled by setting <code>true_cfg_scale > 1</code> and a provided <code>negative_prompt</code>. Higher guidance scale | |
| encourages to generate images that are closely linked to the text <code>prompt</code>, usually at the expense of | |
| lower image quality.`,name:"true_cfg_scale"},{anchor:"diffusers.QwenImageEditPlusPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The height in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"height"},{anchor:"diffusers.QwenImageEditPlusPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The width in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"width"},{anchor:"diffusers.QwenImageEditPlusPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.QwenImageEditPlusPipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) — | |
| Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in | |
| their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed | |
| will be used.`,name:"sigmas"},{anchor:"diffusers.QwenImageEditPlusPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to None) — | |
| A guidance scale value for guidance distilled models. Unlike the traditional classifier-free guidance | |
| where the guidance scale is applied during inference through noise prediction rescaling, guidance | |
| distilled models take the guidance scale directly as an input parameter during forward pass. Guidance | |
| scale is enabled by setting <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images | |
| that are closely linked to the text <code>prompt</code>, usually at the expense of lower image quality. This | |
| parameter in the pipeline is there to support future guidance-distilled models when they come up. It is | |
| ignored when not using guidance distilled models. To enable traditional classifier-free guidance, | |
| please pass <code>true_cfg_scale > 1.0</code> and <code>negative_prompt</code> (even an empty negative prompt like ” ” should | |
| enable classifier-free guidance computations).`,name:"guidance_scale"},{anchor:"diffusers.QwenImageEditPlusPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.QwenImageEditPlusPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.QwenImageEditPlusPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will be generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.QwenImageEditPlusPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.QwenImageEditPlusPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.QwenImageEditPlusPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.QwenImageEditPlusPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.qwenimage.QwenImagePipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.QwenImageEditPlusPipeline.__call__.attention_kwargs",description:`<strong>attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"attention_kwargs"},{anchor:"diffusers.QwenImageEditPlusPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.QwenImageEditPlusPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.QwenImageEditPlusPipeline.__call__.max_sequence_length",description:"<strong>max_sequence_length</strong> (<code>int</code> defaults to 512) — Maximum sequence length to use with the <code>prompt</code>.",name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit_plus.py#L515",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.qwenimage.QwenImagePipelineOutput</code> if <code>return_dict</code> is True, otherwise a <code>tuple</code>. When | |
| returning a tuple, the first element is a list with the generated images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.qwenimage.QwenImagePipelineOutput</code> or <code>tuple</code></p> | |
| `}}),Ue=new Dn({props:{anchor:"diffusers.QwenImageEditPlusPipeline.__call__.example",$$slots:{default:[Ni]},$$scope:{ctx:C}}}),Gn=new I({props:{name:"encode_prompt",anchor:"diffusers.QwenImageEditPlusPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"image",val:": typing.Optional[torch.Tensor] = None"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"max_sequence_length",val:": int = 1024"}],parametersDescription:[{anchor:"diffusers.QwenImageEditPlusPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.QwenImageEditPlusPipeline.encode_prompt.image",description:`<strong>image</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| image to be encoded`,name:"image"},{anchor:"diffusers.QwenImageEditPlusPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>): | |
| torch device`,name:"device"},{anchor:"diffusers.QwenImageEditPlusPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.QwenImageEditPlusPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"}],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit_plus.py#L287"}}),Bn=new B({props:{title:"QwenImagePipelineOutput",local:"diffusers.pipelines.qwenimage.pipeline_output.QwenImagePipelineOutput",headingTag:"h2"}}),Vn=new I({props:{name:"class diffusers.pipelines.qwenimage.pipeline_output.QwenImagePipelineOutput",anchor:"diffusers.pipelines.qwenimage.pipeline_output.QwenImagePipelineOutput",parameters:[{name:"images",val:": typing.Union[typing.List[PIL.Image.Image], numpy.ndarray]"}],parametersDescription:[{anchor:"diffusers.pipelines.qwenimage.pipeline_output.QwenImagePipelineOutput.images",description:`<strong>images</strong> (<code>List[PIL.Image.Image]</code> or <code>np.ndarray</code>) — | |
| List of denoised PIL images of length <code>batch_size</code> or numpy array of shape <code>(batch_size, height, width, num_channels)</code>. PIL images or numpy array present the denoised images of the diffusion pipeline.`,name:"images"}],source:"https://github.com/huggingface/diffusers/blob/vr_12448/src/diffusers/pipelines/qwenimage/pipeline_output.py#L11"}}),qn=new $i({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/qwenimage.md"}}),{c(){f=i("meta"),x=t(),M=i("p"),_=t(),d(v.$$.fragment),o=t(),T=i("div"),T.innerHTML=xo,At=t(),$e=i("p"),$e.textContent=Jo,St=t(),Pe=i("p"),Pe.textContent=Qo,Ot=t(),Ce=i("table"),Ce.innerHTML=ko,Kt=t(),Ze=i("blockquote"),Ze.innerHTML=Uo,ea=t(),d(We.$$.fragment),na=t(),Ee=i("p"),Ee.innerHTML=jo,ta=t(),S=i("details"),An=i("summary"),An.textContent=$o,Ya=t(),d(Le.$$.fragment),aa=t(),O=i("blockquote"),O.innerHTML=Po,sa=t(),d(Ne.$$.fragment),oa=t(),Ge=i("p"),Ge.innerHTML=Co,ia=t(),d(Be.$$.fragment),ra=t(),d(Ve.$$.fragment),la=t(),J=i("div"),d(qe.$$.fragment),Da=t(),Sn=i("p"),Sn.textContent=Zo,za=t(),V=i("div"),d(Re.$$.fragment),Aa=t(),On=i("p"),On.textContent=Wo,Sa=t(),d(K.$$.fragment),Oa=t(),ee=i("div"),d(Xe.$$.fragment),Ka=t(),Kn=i("p"),Kn.innerHTML=Eo,es=t(),ne=i("div"),d(Fe.$$.fragment),ns=t(),et=i("p"),et.innerHTML=Lo,ts=t(),te=i("div"),d(He.$$.fragment),as=t(),nt=i("p"),nt.textContent=No,ss=t(),ae=i("div"),d(Ye.$$.fragment),os=t(),tt=i("p"),tt.textContent=Go,is=t(),at=i("div"),d(De.$$.fragment),da=t(),d(ze.$$.fragment),pa=t(),Q=i("div"),d(Ae.$$.fragment),rs=t(),st=i("p"),st.textContent=Bo,ls=t(),q=i("div"),d(Se.$$.fragment),ds=t(),ot=i("p"),ot.textContent=Vo,ps=t(),d(se.$$.fragment),cs=t(),oe=i("div"),d(Oe.$$.fragment),ms=t(),it=i("p"),it.innerHTML=qo,gs=t(),ie=i("div"),d(Ke.$$.fragment),us=t(),rt=i("p"),rt.innerHTML=Ro,fs=t(),re=i("div"),d(en.$$.fragment),hs=t(),lt=i("p"),lt.textContent=Xo,_s=t(),le=i("div"),d(nn.$$.fragment),ws=t(),dt=i("p"),dt.textContent=Fo,bs=t(),pt=i("div"),d(tn.$$.fragment),ca=t(),d(an.$$.fragment),ma=t(),k=i("div"),d(sn.$$.fragment),ys=t(),ct=i("p"),ct.textContent=Ho,Is=t(),R=i("div"),d(on.$$.fragment),vs=t(),mt=i("p"),mt.textContent=Yo,Ms=t(),d(de.$$.fragment),Ts=t(),pe=i("div"),d(rn.$$.fragment),xs=t(),gt=i("p"),gt.innerHTML=Do,Js=t(),ce=i("div"),d(ln.$$.fragment),Qs=t(),ut=i("p"),ut.innerHTML=zo,ks=t(),me=i("div"),d(dn.$$.fragment),Us=t(),ft=i("p"),ft.textContent=Ao,js=t(),ge=i("div"),d(pn.$$.fragment),$s=t(),ht=i("p"),ht.textContent=So,Ps=t(),_t=i("div"),d(cn.$$.fragment),ga=t(),d(mn.$$.fragment),ua=t(),U=i("div"),d(gn.$$.fragment),Cs=t(),wt=i("p"),wt.textContent=Oo,Zs=t(),X=i("div"),d(un.$$.fragment),Ws=t(),bt=i("p"),bt.textContent=Ko,Es=t(),d(ue.$$.fragment),Ls=t(),fe=i("div"),d(fn.$$.fragment),Ns=t(),yt=i("p"),yt.innerHTML=ei,Gs=t(),he=i("div"),d(hn.$$.fragment),Bs=t(),It=i("p"),It.innerHTML=ni,Vs=t(),_e=i("div"),d(_n.$$.fragment),qs=t(),vt=i("p"),vt.textContent=ti,Rs=t(),we=i("div"),d(wn.$$.fragment),Xs=t(),Mt=i("p"),Mt.textContent=ai,Fs=t(),Tt=i("div"),d(bn.$$.fragment),fa=t(),d(yn.$$.fragment),ha=t(),j=i("div"),d(In.$$.fragment),Hs=t(),xt=i("p"),xt.textContent=si,Ys=t(),F=i("div"),d(vn.$$.fragment),Ds=t(),Jt=i("p"),Jt.textContent=oi,zs=t(),d(be.$$.fragment),As=t(),ye=i("div"),d(Mn.$$.fragment),Ss=t(),Qt=i("p"),Qt.innerHTML=ii,Os=t(),Ie=i("div"),d(Tn.$$.fragment),Ks=t(),kt=i("p"),kt.innerHTML=ri,eo=t(),ve=i("div"),d(xn.$$.fragment),no=t(),Ut=i("p"),Ut.textContent=li,to=t(),Me=i("div"),d(Jn.$$.fragment),ao=t(),jt=i("p"),jt.textContent=di,so=t(),$t=i("div"),d(Qn.$$.fragment),_a=t(),d(kn.$$.fragment),wa=t(),$=i("div"),d(Un.$$.fragment),oo=t(),Pt=i("p"),Pt.textContent=pi,io=t(),H=i("div"),d(jn.$$.fragment),ro=t(),Ct=i("p"),Ct.textContent=ci,lo=t(),d(Te.$$.fragment),po=t(),xe=i("div"),d($n.$$.fragment),co=t(),Zt=i("p"),Zt.innerHTML=mi,mo=t(),Je=i("div"),d(Pn.$$.fragment),go=t(),Wt=i("p"),Wt.innerHTML=gi,uo=t(),Qe=i("div"),d(Cn.$$.fragment),fo=t(),Et=i("p"),Et.textContent=ui,ho=t(),ke=i("div"),d(Zn.$$.fragment),_o=t(),Lt=i("p"),Lt.textContent=fi,wo=t(),Nt=i("div"),d(Wn.$$.fragment),ba=t(),d(En.$$.fragment),ya=t(),G=i("div"),d(Ln.$$.fragment),bo=t(),Gt=i("p"),Gt.textContent=hi,yo=t(),Y=i("div"),d(Nn.$$.fragment),Io=t(),Bt=i("p"),Bt.textContent=_i,vo=t(),d(Ue.$$.fragment),Mo=t(),Vt=i("div"),d(Gn.$$.fragment),Ia=t(),d(Bn.$$.fragment),va=t(),D=i("div"),d(Vn.$$.fragment),To=t(),qt=i("p"),qt.textContent=wi,Ma=t(),d(qn.$$.fragment),Ta=t(),zt=i("p"),this.h()},l(e){const l=ji("svelte-u9bgzb",document.head);f=r(l,"META",{name:!0,content:!0}),l.forEach(s),x=a(e),M=r(e,"P",{}),y(M).forEach(s),_=a(e),p(v.$$.fragment,e),o=a(e),T=r(e,"DIV",{class:!0,"data-svelte-h":!0}),w(T)!=="svelte-si9ct8"&&(T.innerHTML=xo),At=a(e),$e=r(e,"P",{"data-svelte-h":!0}),w($e)!=="svelte-f9n2hx"&&($e.textContent=Jo),St=a(e),Pe=r(e,"P",{"data-svelte-h":!0}),w(Pe)!=="svelte-1t9xj8p"&&(Pe.textContent=Qo),Ot=a(e),Ce=r(e,"TABLE",{"data-svelte-h":!0}),w(Ce)!=="svelte-rj2irp"&&(Ce.innerHTML=ko),Kt=a(e),Ze=r(e,"BLOCKQUOTE",{"data-svelte-h":!0}),w(Ze)!=="svelte-41zj9c"&&(Ze.innerHTML=Uo),ea=a(e),p(We.$$.fragment,e),na=a(e),Ee=r(e,"P",{"data-svelte-h":!0}),w(Ee)!=="svelte-upho6n"&&(Ee.innerHTML=jo),ta=a(e),S=r(e,"DETAILS",{});var Rn=y(S);An=r(Rn,"SUMMARY",{"data-svelte-h":!0}),w(An)!=="svelte-14caxiv"&&(An.textContent=$o),Ya=a(Rn),p(Le.$$.fragment,Rn),Rn.forEach(s),aa=a(e),O=r(e,"BLOCKQUOTE",{class:!0,"data-svelte-h":!0}),w(O)!=="svelte-1irgdn5"&&(O.innerHTML=Po),sa=a(e),p(Ne.$$.fragment,e),oa=a(e),Ge=r(e,"P",{"data-svelte-h":!0}),w(Ge)!=="svelte-1f73ske"&&(Ge.innerHTML=Co),ia=a(e),p(Be.$$.fragment,e),ra=a(e),p(Ve.$$.fragment,e),la=a(e),J=r(e,"DIV",{class:!0});var P=y(J);p(qe.$$.fragment,P),Da=a(P),Sn=r(P,"P",{"data-svelte-h":!0}),w(Sn)!=="svelte-1b1rx25"&&(Sn.textContent=Zo),za=a(P),V=r(P,"DIV",{class:!0});var z=y(V);p(Re.$$.fragment,z),Aa=a(z),On=r(z,"P",{"data-svelte-h":!0}),w(On)!=="svelte-v78lg8"&&(On.textContent=Wo),Sa=a(z),p(K.$$.fragment,z),z.forEach(s),Oa=a(P),ee=r(P,"DIV",{class:!0});var Xn=y(ee);p(Xe.$$.fragment,Xn),Ka=a(Xn),Kn=r(Xn,"P",{"data-svelte-h":!0}),w(Kn)!=="svelte-1s3c06i"&&(Kn.innerHTML=Eo),Xn.forEach(s),es=a(P),ne=r(P,"DIV",{class:!0});var Fn=y(ne);p(Fe.$$.fragment,Fn),ns=a(Fn),et=r(Fn,"P",{"data-svelte-h":!0}),w(et)!=="svelte-pkn4ui"&&(et.innerHTML=Lo),Fn.forEach(s),ts=a(P),te=r(P,"DIV",{class:!0});var Hn=y(te);p(He.$$.fragment,Hn),as=a(Hn),nt=r(Hn,"P",{"data-svelte-h":!0}),w(nt)!=="svelte-14bnrb6"&&(nt.textContent=No),Hn.forEach(s),ss=a(P),ae=r(P,"DIV",{class:!0});var Yn=y(ae);p(Ye.$$.fragment,Yn),os=a(Yn),tt=r(Yn,"P",{"data-svelte-h":!0}),w(tt)!=="svelte-1xwrf7t"&&(tt.textContent=Go),Yn.forEach(s),is=a(P),at=r(P,"DIV",{class:!0});var bi=y(at);p(De.$$.fragment,bi),bi.forEach(s),P.forEach(s),da=a(e),p(ze.$$.fragment,e),pa=a(e),Q=r(e,"DIV",{class:!0});var Z=y(Q);p(Ae.$$.fragment,Z),rs=a(Z),st=r(Z,"P",{"data-svelte-h":!0}),w(st)!=="svelte-1b1rx25"&&(st.textContent=Bo),ls=a(Z),q=r(Z,"DIV",{class:!0});var Rt=y(q);p(Se.$$.fragment,Rt),ds=a(Rt),ot=r(Rt,"P",{"data-svelte-h":!0}),w(ot)!=="svelte-v78lg8"&&(ot.textContent=Vo),ps=a(Rt),p(se.$$.fragment,Rt),Rt.forEach(s),cs=a(Z),oe=r(Z,"DIV",{class:!0});var Ja=y(oe);p(Oe.$$.fragment,Ja),ms=a(Ja),it=r(Ja,"P",{"data-svelte-h":!0}),w(it)!=="svelte-1s3c06i"&&(it.innerHTML=qo),Ja.forEach(s),gs=a(Z),ie=r(Z,"DIV",{class:!0});var Qa=y(ie);p(Ke.$$.fragment,Qa),us=a(Qa),rt=r(Qa,"P",{"data-svelte-h":!0}),w(rt)!=="svelte-pkn4ui"&&(rt.innerHTML=Ro),Qa.forEach(s),fs=a(Z),re=r(Z,"DIV",{class:!0});var ka=y(re);p(en.$$.fragment,ka),hs=a(ka),lt=r(ka,"P",{"data-svelte-h":!0}),w(lt)!=="svelte-14bnrb6"&&(lt.textContent=Xo),ka.forEach(s),_s=a(Z),le=r(Z,"DIV",{class:!0});var Ua=y(le);p(nn.$$.fragment,Ua),ws=a(Ua),dt=r(Ua,"P",{"data-svelte-h":!0}),w(dt)!=="svelte-1xwrf7t"&&(dt.textContent=Fo),Ua.forEach(s),bs=a(Z),pt=r(Z,"DIV",{class:!0});var yi=y(pt);p(tn.$$.fragment,yi),yi.forEach(s),Z.forEach(s),ca=a(e),p(an.$$.fragment,e),ma=a(e),k=r(e,"DIV",{class:!0});var W=y(k);p(sn.$$.fragment,W),ys=a(W),ct=r(W,"P",{"data-svelte-h":!0}),w(ct)!=="svelte-1b1rx25"&&(ct.textContent=Ho),Is=a(W),R=r(W,"DIV",{class:!0});var Xt=y(R);p(on.$$.fragment,Xt),vs=a(Xt),mt=r(Xt,"P",{"data-svelte-h":!0}),w(mt)!=="svelte-v78lg8"&&(mt.textContent=Yo),Ms=a(Xt),p(de.$$.fragment,Xt),Xt.forEach(s),Ts=a(W),pe=r(W,"DIV",{class:!0});var ja=y(pe);p(rn.$$.fragment,ja),xs=a(ja),gt=r(ja,"P",{"data-svelte-h":!0}),w(gt)!=="svelte-1s3c06i"&&(gt.innerHTML=Do),ja.forEach(s),Js=a(W),ce=r(W,"DIV",{class:!0});var $a=y(ce);p(ln.$$.fragment,$a),Qs=a($a),ut=r($a,"P",{"data-svelte-h":!0}),w(ut)!=="svelte-pkn4ui"&&(ut.innerHTML=zo),$a.forEach(s),ks=a(W),me=r(W,"DIV",{class:!0});var Pa=y(me);p(dn.$$.fragment,Pa),Us=a(Pa),ft=r(Pa,"P",{"data-svelte-h":!0}),w(ft)!=="svelte-14bnrb6"&&(ft.textContent=Ao),Pa.forEach(s),js=a(W),ge=r(W,"DIV",{class:!0});var Ca=y(ge);p(pn.$$.fragment,Ca),$s=a(Ca),ht=r(Ca,"P",{"data-svelte-h":!0}),w(ht)!=="svelte-1xwrf7t"&&(ht.textContent=So),Ca.forEach(s),Ps=a(W),_t=r(W,"DIV",{class:!0});var Ii=y(_t);p(cn.$$.fragment,Ii),Ii.forEach(s),W.forEach(s),ga=a(e),p(mn.$$.fragment,e),ua=a(e),U=r(e,"DIV",{class:!0});var E=y(U);p(gn.$$.fragment,E),Cs=a(E),wt=r(E,"P",{"data-svelte-h":!0}),w(wt)!=="svelte-cjiar3"&&(wt.textContent=Oo),Zs=a(E),X=r(E,"DIV",{class:!0});var Ft=y(X);p(un.$$.fragment,Ft),Ws=a(Ft),bt=r(Ft,"P",{"data-svelte-h":!0}),w(bt)!=="svelte-v78lg8"&&(bt.textContent=Ko),Es=a(Ft),p(ue.$$.fragment,Ft),Ft.forEach(s),Ls=a(E),fe=r(E,"DIV",{class:!0});var Za=y(fe);p(fn.$$.fragment,Za),Ns=a(Za),yt=r(Za,"P",{"data-svelte-h":!0}),w(yt)!=="svelte-1s3c06i"&&(yt.innerHTML=ei),Za.forEach(s),Gs=a(E),he=r(E,"DIV",{class:!0});var Wa=y(he);p(hn.$$.fragment,Wa),Bs=a(Wa),It=r(Wa,"P",{"data-svelte-h":!0}),w(It)!=="svelte-pkn4ui"&&(It.innerHTML=ni),Wa.forEach(s),Vs=a(E),_e=r(E,"DIV",{class:!0});var Ea=y(_e);p(_n.$$.fragment,Ea),qs=a(Ea),vt=r(Ea,"P",{"data-svelte-h":!0}),w(vt)!=="svelte-14bnrb6"&&(vt.textContent=ti),Ea.forEach(s),Rs=a(E),we=r(E,"DIV",{class:!0});var La=y(we);p(wn.$$.fragment,La),Xs=a(La),Mt=r(La,"P",{"data-svelte-h":!0}),w(Mt)!=="svelte-1xwrf7t"&&(Mt.textContent=ai),La.forEach(s),Fs=a(E),Tt=r(E,"DIV",{class:!0});var vi=y(Tt);p(bn.$$.fragment,vi),vi.forEach(s),E.forEach(s),fa=a(e),p(yn.$$.fragment,e),ha=a(e),j=r(e,"DIV",{class:!0});var L=y(j);p(In.$$.fragment,L),Hs=a(L),xt=r(L,"P",{"data-svelte-h":!0}),w(xt)!=="svelte-cjiar3"&&(xt.textContent=si),Ys=a(L),F=r(L,"DIV",{class:!0});var Ht=y(F);p(vn.$$.fragment,Ht),Ds=a(Ht),Jt=r(Ht,"P",{"data-svelte-h":!0}),w(Jt)!=="svelte-v78lg8"&&(Jt.textContent=oi),zs=a(Ht),p(be.$$.fragment,Ht),Ht.forEach(s),As=a(L),ye=r(L,"DIV",{class:!0});var Na=y(ye);p(Mn.$$.fragment,Na),Ss=a(Na),Qt=r(Na,"P",{"data-svelte-h":!0}),w(Qt)!=="svelte-1s3c06i"&&(Qt.innerHTML=ii),Na.forEach(s),Os=a(L),Ie=r(L,"DIV",{class:!0});var Ga=y(Ie);p(Tn.$$.fragment,Ga),Ks=a(Ga),kt=r(Ga,"P",{"data-svelte-h":!0}),w(kt)!=="svelte-pkn4ui"&&(kt.innerHTML=ri),Ga.forEach(s),eo=a(L),ve=r(L,"DIV",{class:!0});var Ba=y(ve);p(xn.$$.fragment,Ba),no=a(Ba),Ut=r(Ba,"P",{"data-svelte-h":!0}),w(Ut)!=="svelte-14bnrb6"&&(Ut.textContent=li),Ba.forEach(s),to=a(L),Me=r(L,"DIV",{class:!0});var Va=y(Me);p(Jn.$$.fragment,Va),ao=a(Va),jt=r(Va,"P",{"data-svelte-h":!0}),w(jt)!=="svelte-1xwrf7t"&&(jt.textContent=di),Va.forEach(s),so=a(L),$t=r(L,"DIV",{class:!0});var Mi=y($t);p(Qn.$$.fragment,Mi),Mi.forEach(s),L.forEach(s),_a=a(e),p(kn.$$.fragment,e),wa=a(e),$=r(e,"DIV",{class:!0});var N=y($);p(Un.$$.fragment,N),oo=a(N),Pt=r(N,"P",{"data-svelte-h":!0}),w(Pt)!=="svelte-1b1rx25"&&(Pt.textContent=pi),io=a(N),H=r(N,"DIV",{class:!0});var Yt=y(H);p(jn.$$.fragment,Yt),ro=a(Yt),Ct=r(Yt,"P",{"data-svelte-h":!0}),w(Ct)!=="svelte-v78lg8"&&(Ct.textContent=ci),lo=a(Yt),p(Te.$$.fragment,Yt),Yt.forEach(s),po=a(N),xe=r(N,"DIV",{class:!0});var qa=y(xe);p($n.$$.fragment,qa),co=a(qa),Zt=r(qa,"P",{"data-svelte-h":!0}),w(Zt)!=="svelte-1s3c06i"&&(Zt.innerHTML=mi),qa.forEach(s),mo=a(N),Je=r(N,"DIV",{class:!0});var Ra=y(Je);p(Pn.$$.fragment,Ra),go=a(Ra),Wt=r(Ra,"P",{"data-svelte-h":!0}),w(Wt)!=="svelte-pkn4ui"&&(Wt.innerHTML=gi),Ra.forEach(s),uo=a(N),Qe=r(N,"DIV",{class:!0});var Xa=y(Qe);p(Cn.$$.fragment,Xa),fo=a(Xa),Et=r(Xa,"P",{"data-svelte-h":!0}),w(Et)!=="svelte-14bnrb6"&&(Et.textContent=ui),Xa.forEach(s),ho=a(N),ke=r(N,"DIV",{class:!0});var Fa=y(ke);p(Zn.$$.fragment,Fa),_o=a(Fa),Lt=r(Fa,"P",{"data-svelte-h":!0}),w(Lt)!=="svelte-1xwrf7t"&&(Lt.textContent=fi),Fa.forEach(s),wo=a(N),Nt=r(N,"DIV",{class:!0});var Ti=y(Nt);p(Wn.$$.fragment,Ti),Ti.forEach(s),N.forEach(s),ba=a(e),p(En.$$.fragment,e),ya=a(e),G=r(e,"DIV",{class:!0});var je=y(G);p(Ln.$$.fragment,je),bo=a(je),Gt=r(je,"P",{"data-svelte-h":!0}),w(Gt)!=="svelte-cjiar3"&&(Gt.textContent=hi),yo=a(je),Y=r(je,"DIV",{class:!0});var Dt=y(Y);p(Nn.$$.fragment,Dt),Io=a(Dt),Bt=r(Dt,"P",{"data-svelte-h":!0}),w(Bt)!=="svelte-v78lg8"&&(Bt.textContent=_i),vo=a(Dt),p(Ue.$$.fragment,Dt),Dt.forEach(s),Mo=a(je),Vt=r(je,"DIV",{class:!0});var xi=y(Vt);p(Gn.$$.fragment,xi),xi.forEach(s),je.forEach(s),Ia=a(e),p(Bn.$$.fragment,e),va=a(e),D=r(e,"DIV",{class:!0});var Ha=y(D);p(Vn.$$.fragment,Ha),To=a(Ha),qt=r(Ha,"P",{"data-svelte-h":!0}),w(qt)!=="svelte-1qpjiuf"&&(qt.textContent=wi),Ha.forEach(s),Ma=a(e),p(qn.$$.fragment,e),Ta=a(e),zt=r(e,"P",{}),y(zt).forEach(s),this.h()},h(){b(f,"name","hf:doc:metadata"),b(f,"content",Bi),b(T,"class","flex flex-wrap space-x-1"),b(O,"class","tip"),b(V,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(ee,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(ne,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(te,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(ae,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(at,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(J,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(oe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(ie,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(re,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(le,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(pt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(Q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(R,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(pe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(ce,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(me,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(ge,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(_t,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(k,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(X,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(fe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(he,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(_e,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(we,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(Tt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(U,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(F,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(ye,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(Ie,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(ve,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(Me,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b($t,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(j,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(H,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(xe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(Je,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(Qe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(ke,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(Nt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b($,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(Y,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(Vt,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(G,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),b(D,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,l){n(document.head,f),h(e,x,l),h(e,M,l),h(e,_,l),c(v,e,l),h(e,o,l),h(e,T,l),h(e,At,l),h(e,$e,l),h(e,St,l),h(e,Pe,l),h(e,Ot,l),h(e,Ce,l),h(e,Kt,l),h(e,Ze,l),h(e,ea,l),c(We,e,l),h(e,na,l),h(e,Ee,l),h(e,ta,l),h(e,S,l),n(S,An),n(S,Ya),c(Le,S,null),h(e,aa,l),h(e,O,l),h(e,sa,l),c(Ne,e,l),h(e,oa,l),h(e,Ge,l),h(e,ia,l),c(Be,e,l),h(e,ra,l),c(Ve,e,l),h(e,la,l),h(e,J,l),c(qe,J,null),n(J,Da),n(J,Sn),n(J,za),n(J,V),c(Re,V,null),n(V,Aa),n(V,On),n(V,Sa),c(K,V,null),n(J,Oa),n(J,ee),c(Xe,ee,null),n(ee,Ka),n(ee,Kn),n(J,es),n(J,ne),c(Fe,ne,null),n(ne,ns),n(ne,et),n(J,ts),n(J,te),c(He,te,null),n(te,as),n(te,nt),n(J,ss),n(J,ae),c(Ye,ae,null),n(ae,os),n(ae,tt),n(J,is),n(J,at),c(De,at,null),h(e,da,l),c(ze,e,l),h(e,pa,l),h(e,Q,l),c(Ae,Q,null),n(Q,rs),n(Q,st),n(Q,ls),n(Q,q),c(Se,q,null),n(q,ds),n(q,ot),n(q,ps),c(se,q,null),n(Q,cs),n(Q,oe),c(Oe,oe,null),n(oe,ms),n(oe,it),n(Q,gs),n(Q,ie),c(Ke,ie,null),n(ie,us),n(ie,rt),n(Q,fs),n(Q,re),c(en,re,null),n(re,hs),n(re,lt),n(Q,_s),n(Q,le),c(nn,le,null),n(le,ws),n(le,dt),n(Q,bs),n(Q,pt),c(tn,pt,null),h(e,ca,l),c(an,e,l),h(e,ma,l),h(e,k,l),c(sn,k,null),n(k,ys),n(k,ct),n(k,Is),n(k,R),c(on,R,null),n(R,vs),n(R,mt),n(R,Ms),c(de,R,null),n(k,Ts),n(k,pe),c(rn,pe,null),n(pe,xs),n(pe,gt),n(k,Js),n(k,ce),c(ln,ce,null),n(ce,Qs),n(ce,ut),n(k,ks),n(k,me),c(dn,me,null),n(me,Us),n(me,ft),n(k,js),n(k,ge),c(pn,ge,null),n(ge,$s),n(ge,ht),n(k,Ps),n(k,_t),c(cn,_t,null),h(e,ga,l),c(mn,e,l),h(e,ua,l),h(e,U,l),c(gn,U,null),n(U,Cs),n(U,wt),n(U,Zs),n(U,X),c(un,X,null),n(X,Ws),n(X,bt),n(X,Es),c(ue,X,null),n(U,Ls),n(U,fe),c(fn,fe,null),n(fe,Ns),n(fe,yt),n(U,Gs),n(U,he),c(hn,he,null),n(he,Bs),n(he,It),n(U,Vs),n(U,_e),c(_n,_e,null),n(_e,qs),n(_e,vt),n(U,Rs),n(U,we),c(wn,we,null),n(we,Xs),n(we,Mt),n(U,Fs),n(U,Tt),c(bn,Tt,null),h(e,fa,l),c(yn,e,l),h(e,ha,l),h(e,j,l),c(In,j,null),n(j,Hs),n(j,xt),n(j,Ys),n(j,F),c(vn,F,null),n(F,Ds),n(F,Jt),n(F,zs),c(be,F,null),n(j,As),n(j,ye),c(Mn,ye,null),n(ye,Ss),n(ye,Qt),n(j,Os),n(j,Ie),c(Tn,Ie,null),n(Ie,Ks),n(Ie,kt),n(j,eo),n(j,ve),c(xn,ve,null),n(ve,no),n(ve,Ut),n(j,to),n(j,Me),c(Jn,Me,null),n(Me,ao),n(Me,jt),n(j,so),n(j,$t),c(Qn,$t,null),h(e,_a,l),c(kn,e,l),h(e,wa,l),h(e,$,l),c(Un,$,null),n($,oo),n($,Pt),n($,io),n($,H),c(jn,H,null),n(H,ro),n(H,Ct),n(H,lo),c(Te,H,null),n($,po),n($,xe),c($n,xe,null),n(xe,co),n(xe,Zt),n($,mo),n($,Je),c(Pn,Je,null),n(Je,go),n(Je,Wt),n($,uo),n($,Qe),c(Cn,Qe,null),n(Qe,fo),n(Qe,Et),n($,ho),n($,ke),c(Zn,ke,null),n(ke,_o),n(ke,Lt),n($,wo),n($,Nt),c(Wn,Nt,null),h(e,ba,l),c(En,e,l),h(e,ya,l),h(e,G,l),c(Ln,G,null),n(G,bo),n(G,Gt),n(G,yo),n(G,Y),c(Nn,Y,null),n(Y,Io),n(Y,Bt),n(Y,vo),c(Ue,Y,null),n(G,Mo),n(G,Vt),c(Gn,Vt,null),h(e,Ia,l),c(Bn,e,l),h(e,va,l),h(e,D,l),c(Vn,D,null),n(D,To),n(D,qt),h(e,Ma,l),c(qn,e,l),h(e,Ta,l),h(e,zt,l),xa=!0},p(e,[l]){const Rn={};l&2&&(Rn.$$scope={dirty:l,ctx:e}),K.$set(Rn);const P={};l&2&&(P.$$scope={dirty:l,ctx:e}),se.$set(P);const z={};l&2&&(z.$$scope={dirty:l,ctx:e}),de.$set(z);const Xn={};l&2&&(Xn.$$scope={dirty:l,ctx:e}),ue.$set(Xn);const Fn={};l&2&&(Fn.$$scope={dirty:l,ctx:e}),be.$set(Fn);const Hn={};l&2&&(Hn.$$scope={dirty:l,ctx:e}),Te.$set(Hn);const Yn={};l&2&&(Yn.$$scope={dirty:l,ctx:e}),Ue.$set(Yn)},i(e){xa||(m(v.$$.fragment,e),m(We.$$.fragment,e),m(Le.$$.fragment,e),m(Ne.$$.fragment,e),m(Be.$$.fragment,e),m(Ve.$$.fragment,e),m(qe.$$.fragment,e),m(Re.$$.fragment,e),m(K.$$.fragment,e),m(Xe.$$.fragment,e),m(Fe.$$.fragment,e),m(He.$$.fragment,e),m(Ye.$$.fragment,e),m(De.$$.fragment,e),m(ze.$$.fragment,e),m(Ae.$$.fragment,e),m(Se.$$.fragment,e),m(se.$$.fragment,e),m(Oe.$$.fragment,e),m(Ke.$$.fragment,e),m(en.$$.fragment,e),m(nn.$$.fragment,e),m(tn.$$.fragment,e),m(an.$$.fragment,e),m(sn.$$.fragment,e),m(on.$$.fragment,e),m(de.$$.fragment,e),m(rn.$$.fragment,e),m(ln.$$.fragment,e),m(dn.$$.fragment,e),m(pn.$$.fragment,e),m(cn.$$.fragment,e),m(mn.$$.fragment,e),m(gn.$$.fragment,e),m(un.$$.fragment,e),m(ue.$$.fragment,e),m(fn.$$.fragment,e),m(hn.$$.fragment,e),m(_n.$$.fragment,e),m(wn.$$.fragment,e),m(bn.$$.fragment,e),m(yn.$$.fragment,e),m(In.$$.fragment,e),m(vn.$$.fragment,e),m(be.$$.fragment,e),m(Mn.$$.fragment,e),m(Tn.$$.fragment,e),m(xn.$$.fragment,e),m(Jn.$$.fragment,e),m(Qn.$$.fragment,e),m(kn.$$.fragment,e),m(Un.$$.fragment,e),m(jn.$$.fragment,e),m(Te.$$.fragment,e),m($n.$$.fragment,e),m(Pn.$$.fragment,e),m(Cn.$$.fragment,e),m(Zn.$$.fragment,e),m(Wn.$$.fragment,e),m(En.$$.fragment,e),m(Ln.$$.fragment,e),m(Nn.$$.fragment,e),m(Ue.$$.fragment,e),m(Gn.$$.fragment,e),m(Bn.$$.fragment,e),m(Vn.$$.fragment,e),m(qn.$$.fragment,e),xa=!0)},o(e){g(v.$$.fragment,e),g(We.$$.fragment,e),g(Le.$$.fragment,e),g(Ne.$$.fragment,e),g(Be.$$.fragment,e),g(Ve.$$.fragment,e),g(qe.$$.fragment,e),g(Re.$$.fragment,e),g(K.$$.fragment,e),g(Xe.$$.fragment,e),g(Fe.$$.fragment,e),g(He.$$.fragment,e),g(Ye.$$.fragment,e),g(De.$$.fragment,e),g(ze.$$.fragment,e),g(Ae.$$.fragment,e),g(Se.$$.fragment,e),g(se.$$.fragment,e),g(Oe.$$.fragment,e),g(Ke.$$.fragment,e),g(en.$$.fragment,e),g(nn.$$.fragment,e),g(tn.$$.fragment,e),g(an.$$.fragment,e),g(sn.$$.fragment,e),g(on.$$.fragment,e),g(de.$$.fragment,e),g(rn.$$.fragment,e),g(ln.$$.fragment,e),g(dn.$$.fragment,e),g(pn.$$.fragment,e),g(cn.$$.fragment,e),g(mn.$$.fragment,e),g(gn.$$.fragment,e),g(un.$$.fragment,e),g(ue.$$.fragment,e),g(fn.$$.fragment,e),g(hn.$$.fragment,e),g(_n.$$.fragment,e),g(wn.$$.fragment,e),g(bn.$$.fragment,e),g(yn.$$.fragment,e),g(In.$$.fragment,e),g(vn.$$.fragment,e),g(be.$$.fragment,e),g(Mn.$$.fragment,e),g(Tn.$$.fragment,e),g(xn.$$.fragment,e),g(Jn.$$.fragment,e),g(Qn.$$.fragment,e),g(kn.$$.fragment,e),g(Un.$$.fragment,e),g(jn.$$.fragment,e),g(Te.$$.fragment,e),g($n.$$.fragment,e),g(Pn.$$.fragment,e),g(Cn.$$.fragment,e),g(Zn.$$.fragment,e),g(Wn.$$.fragment,e),g(En.$$.fragment,e),g(Ln.$$.fragment,e),g(Nn.$$.fragment,e),g(Ue.$$.fragment,e),g(Gn.$$.fragment,e),g(Bn.$$.fragment,e),g(Vn.$$.fragment,e),g(qn.$$.fragment,e),xa=!1},d(e){e&&(s(x),s(M),s(_),s(o),s(T),s(At),s($e),s(St),s(Pe),s(Ot),s(Ce),s(Kt),s(Ze),s(ea),s(na),s(Ee),s(ta),s(S),s(aa),s(O),s(sa),s(oa),s(Ge),s(ia),s(ra),s(la),s(J),s(da),s(pa),s(Q),s(ca),s(ma),s(k),s(ga),s(ua),s(U),s(fa),s(ha),s(j),s(_a),s(wa),s($),s(ba),s(ya),s(G),s(Ia),s(va),s(D),s(Ma),s(Ta),s(zt)),s(f),u(v,e),u(We,e),u(Le),u(Ne,e),u(Be,e),u(Ve,e),u(qe),u(Re),u(K),u(Xe),u(Fe),u(He),u(Ye),u(De),u(ze,e),u(Ae),u(Se),u(se),u(Oe),u(Ke),u(en),u(nn),u(tn),u(an,e),u(sn),u(on),u(de),u(rn),u(ln),u(dn),u(pn),u(cn),u(mn,e),u(gn),u(un),u(ue),u(fn),u(hn),u(_n),u(wn),u(bn),u(yn,e),u(In),u(vn),u(be),u(Mn),u(Tn),u(xn),u(Jn),u(Qn),u(kn,e),u(Un),u(jn),u(Te),u($n),u(Pn),u(Cn),u(Zn),u(Wn),u(En,e),u(Ln),u(Nn),u(Ue),u(Gn),u(Bn,e),u(Vn),u(qn,e)}}}const Bi='{"title":"QwenImage","local":"qwenimage","sections":[{"title":"LoRA for faster inference","local":"lora-for-faster-inference","sections":[],"depth":2},{"title":"Multi-image reference with QwenImageEditPlusPipeline","local":"multi-image-reference-with-qwenimageeditpluspipeline","sections":[],"depth":2},{"title":"QwenImagePipeline","local":"diffusers.QwenImagePipeline","sections":[],"depth":2},{"title":"QwenImageImg2ImgPipeline","local":"diffusers.QwenImageImg2ImgPipeline","sections":[],"depth":2},{"title":"QwenImageInpaintPipeline","local":"diffusers.QwenImageInpaintPipeline","sections":[],"depth":2},{"title":"QwenImageEditPipeline","local":"diffusers.QwenImageEditPipeline","sections":[],"depth":2},{"title":"QwenImageEditInpaintPipeline","local":"diffusers.QwenImageEditInpaintPipeline","sections":[],"depth":2},{"title":"QwenImageControlNetPipeline","local":"diffusers.QwenImageControlNetPipeline","sections":[],"depth":2},{"title":"QwenImageEditPlusPipeline","local":"diffusers.QwenImageEditPlusPipeline","sections":[],"depth":2},{"title":"QwenImagePipelineOutput","local":"diffusers.pipelines.qwenimage.pipeline_output.QwenImagePipelineOutput","sections":[],"depth":2}],"depth":1}';function Vi(C){return Qi(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Di extends ki{constructor(f){super(),Ui(this,f,Vi,Gi,Ji,{})}}export{Di as component}; | |
Xet Storage Details
- Size:
- 201 kB
- Xet hash:
- 8dd4a8fc2a827692be93f90559386e32945700b5436ddd84bb01155b650ec888
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.