Buckets:
| import{s as Xi,o as Ei,n as E}from"../chunks/scheduler.8c3d61f6.js";import{S as Vi,i as Yi,g as i,s as o,r as c,A as zi,h as p,f as n,c as s,j as T,u as m,x as _,k as x,y as a,a as l,v as g,d as u,t as f,w as h,m as Qi,n as Ai}from"../chunks/index.da70eac4.js";import{T as Na}from"../chunks/Tip.1d9b8c37.js";import{D as v}from"../chunks/Docstring.d7448bb3.js";import{C as U}from"../chunks/CodeBlock.a9c4becf.js";import{E as ae}from"../chunks/ExampleCodeBlock.bdbc5937.js";import{H as j,E as Hi}from"../chunks/getInferenceSnippets.1d18021a.js";function Si(J){let d,w='Flux can be quite expensive to run on consumer hardware devices. However, you can perform a suite of optimizations to run it faster and in a more memory-friendly manner. Check out <a href="https://huggingface.co/blog/sd3#memory-optimizations-for-sd3" rel="nofollow">this section</a> for more details. Additionally, Flux can benefit from quantization for memory efficiency with a trade-off in inference latency. Refer to <a href="https://huggingface.co/blog/quanto-diffusers" rel="nofollow">this blog post</a> to learn more. For an exhaustive list of resources, check out <a href="https://gist.github.com/sayakpaul/b664605caf0aa3bf8585ab109dd5ac9c" rel="nofollow">this gist</a>.';return{c(){d=i("p"),d.innerHTML=w},l(y){d=p(y,"P",{"data-svelte-h":!0}),_(d)!=="svelte-1nl2vrj"&&(d.innerHTML=w)},m(y,b){l(y,d,b)},p:E,d(y){y&&n(d)}}}function qi(J){let d,w='Check out <a href="../../../using-diffusers/ip_adapter">IP-Adapter</a> to learn more about how IP-Adapters work.';return{c(){d=i("p"),d.innerHTML=w},l(y){d=p(y,"P",{"data-svelte-h":!0}),_(d)!=="svelte-1xq17cf"&&(d.innerHTML=w)},m(y,b){l(y,d,b)},p:E,d(y){y&&n(d)}}}function Di(J){let d,w="It is possible to mix block and leaf-level offloading for different components in a pipeline.";return{c(){d=i("p"),d.textContent=w},l(y){d=p(y,"P",{"data-svelte-h":!0}),_(d)!=="svelte-5ifmld"&&(d.textContent=w)},m(y,b){l(y,d,b)},p:E,d(y){y&&n(d)}}}function Oi(J){let d;return{c(){d=Qi("`FP8` inference can be brittle depending on the GPU type, CUDA version, and `torch` version that you are using. It is recommended that you use the `optimum-quanto` library in order to run FP8 inference on your machine.")},l(w){d=Ai(w,"`FP8` inference can be brittle depending on the GPU type, CUDA version, and `torch` version that you are using. It is recommended that you use the `optimum-quanto` library in order to run FP8 inference on your machine.")},m(w,y){l(w,d,y)},d(w){w&&n(d)}}}function Ki(J){let d,w="Examples:",y,b,M;return b=new U({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwRmx1eFBpcGVsaW5lJTBBJTBBcGlwZSUyMCUzRCUyMEZsdXhQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTIyYmxhY2stZm9yZXN0LWxhYnMlMkZGTFVYLjEtc2NobmVsbCUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYpJTBBcGlwZS50byglMjJjdWRhJTIyKSUwQXByb21wdCUyMCUzRCUyMCUyMkElMjBjYXQlMjBob2xkaW5nJTIwYSUyMHNpZ24lMjB0aGF0JTIwc2F5cyUyMGhlbGxvJTIwd29ybGQlMjIlMEElMjMlMjBEZXBlbmRpbmclMjBvbiUyMHRoZSUyMHZhcmlhbnQlMjBiZWluZyUyMHVzZWQlMkMlMjB0aGUlMjBwaXBlbGluZSUyMGNhbGwlMjB3aWxsJTIwc2xpZ2h0bHklMjB2YXJ5LiUwQSUyMyUyMFJlZmVyJTIwdG8lMjB0aGUlMjBwaXBlbGluZSUyMGRvY3VtZW50YXRpb24lMjBmb3IlMjBtb3JlJTIwZGV0YWlscy4lMEFpbWFnZSUyMCUzRCUyMHBpcGUocHJvbXB0JTJDJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDQlMkMlMjBndWlkYW5jZV9zY2FsZSUzRDAuMCkuaW1hZ2VzJTVCMCU1RCUwQWltYWdlLnNhdmUoJTIyZmx1eC5wbmclMjIp",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> FluxPipeline | |
| <span class="hljs-meta">>>> </span>pipe = FluxPipeline.from_pretrained(<span class="hljs-string">"black-forest-labs/FLUX.1-schnell"</span>, torch_dtype=torch.bfloat16) | |
| <span class="hljs-meta">>>> </span>pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"A cat holding a sign that says hello world"</span> | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Depending on the variant being used, the pipeline call will slightly vary.</span> | |
| <span class="hljs-meta">>>> </span><span class="hljs-comment"># Refer to the pipeline documentation for more details.</span> | |
| <span class="hljs-meta">>>> </span>image = pipe(prompt, num_inference_steps=<span class="hljs-number">4</span>, guidance_scale=<span class="hljs-number">0.0</span>).images[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>image.save(<span class="hljs-string">"flux.png"</span>)`,wrap:!1}}),{c(){d=i("p"),d.textContent=w,y=o(),c(b.$$.fragment)},l(r){d=p(r,"P",{"data-svelte-h":!0}),_(d)!=="svelte-kvfsh7"&&(d.textContent=w),y=s(r),m(b.$$.fragment,r)},m(r,I){l(r,d,I),l(r,y,I),g(b,r,I),M=!0},p:E,i(r){M||(u(b.$$.fragment,r),M=!0)},o(r){f(b.$$.fragment,r),M=!1},d(r){r&&(n(d),n(y)),h(b,r)}}}function ep(J){let d,w="Examples:",y,b,M;return b=new U({props:{code:"aW1wb3J0JTIwdG9yY2glMEElMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwRmx1eEltZzJJbWdQaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBsb2FkX2ltYWdlJTBBJTBBZGV2aWNlJTIwJTNEJTIwJTIyY3VkYSUyMiUwQXBpcGUlMjAlM0QlMjBGbHV4SW1nMkltZ1BpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJibGFjay1mb3Jlc3QtbGFicyUyRkZMVVguMS1zY2huZWxsJTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5iZmxvYXQxNiklMEFwaXBlJTIwJTNEJTIwcGlwZS50byhkZXZpY2UpJTBBJTBBdXJsJTIwJTNEJTIwJTIyaHR0cHMlM0ElMkYlMkZyYXcuZ2l0aHVidXNlcmNvbnRlbnQuY29tJTJGQ29tcFZpcyUyRnN0YWJsZS1kaWZmdXNpb24lMkZtYWluJTJGYXNzZXRzJTJGc3RhYmxlLXNhbXBsZXMlMkZpbWcyaW1nJTJGc2tldGNoLW1vdW50YWlucy1pbnB1dC5qcGclMjIlMEFpbml0X2ltYWdlJTIwJTNEJTIwbG9hZF9pbWFnZSh1cmwpLnJlc2l6ZSgoMTAyNCUyQyUyMDEwMjQpKSUwQSUwQXByb21wdCUyMCUzRCUyMCUyMmNhdCUyMHdpemFyZCUyQyUyMGdhbmRhbGYlMkMlMjBsb3JkJTIwb2YlMjB0aGUlMjByaW5ncyUyQyUyMGRldGFpbGVkJTJDJTIwZmFudGFzeSUyQyUyMGN1dGUlMkMlMjBhZG9yYWJsZSUyQyUyMFBpeGFyJTJDJTIwRGlzbmV5JTJDJTIwOGslMjIlMEElMEFpbWFnZXMlMjAlM0QlMjBwaXBlKCUwQSUyMCUyMCUyMCUyMHByb21wdCUzRHByb21wdCUyQyUyMGltYWdlJTNEaW5pdF9pbWFnZSUyQyUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0Q0JTJDJTIwc3RyZW5ndGglM0QwLjk1JTJDJTIwZ3VpZGFuY2Vfc2NhbGUlM0QwLjAlMEEpLmltYWdlcyU1QjAlNUQ=",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> FluxImg2ImgPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| <span class="hljs-meta">>>> </span>device = <span class="hljs-string">"cuda"</span> | |
| <span class="hljs-meta">>>> </span>pipe = FluxImg2ImgPipeline.from_pretrained(<span class="hljs-string">"black-forest-labs/FLUX.1-schnell"</span>, torch_dtype=torch.bfloat16) | |
| <span class="hljs-meta">>>> </span>pipe = pipe.to(device) | |
| <span class="hljs-meta">>>> </span>url = <span class="hljs-string">"https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"</span> | |
| <span class="hljs-meta">>>> </span>init_image = load_image(url).resize((<span class="hljs-number">1024</span>, <span class="hljs-number">1024</span>)) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"cat wizard, gandalf, lord of the rings, detailed, fantasy, cute, adorable, Pixar, Disney, 8k"</span> | |
| <span class="hljs-meta">>>> </span>images = pipe( | |
| <span class="hljs-meta">... </span> prompt=prompt, image=init_image, num_inference_steps=<span class="hljs-number">4</span>, strength=<span class="hljs-number">0.95</span>, guidance_scale=<span class="hljs-number">0.0</span> | |
| <span class="hljs-meta">... </span>).images[<span class="hljs-number">0</span>]`,wrap:!1}}),{c(){d=i("p"),d.textContent=w,y=o(),c(b.$$.fragment)},l(r){d=p(r,"P",{"data-svelte-h":!0}),_(d)!=="svelte-kvfsh7"&&(d.textContent=w),y=s(r),m(b.$$.fragment,r)},m(r,I){l(r,d,I),l(r,y,I),g(b,r,I),M=!0},p:E,i(r){M||(u(b.$$.fragment,r),M=!0)},o(r){f(b.$$.fragment,r),M=!1},d(r){r&&(n(d),n(y)),h(b,r)}}}function tp(J){let d,w="Examples:",y,b,M;return b=new U({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwRmx1eElucGFpbnRQaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBsb2FkX2ltYWdlJTBBJTBBcGlwZSUyMCUzRCUyMEZsdXhJbnBhaW50UGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUyMmJsYWNrLWZvcmVzdC1sYWJzJTJGRkxVWC4xLXNjaG5lbGwlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2KSUwQXBpcGUudG8oJTIyY3VkYSUyMiklMEFwcm9tcHQlMjAlM0QlMjAlMjJGYWNlJTIwb2YlMjBhJTIweWVsbG93JTIwY2F0JTJDJTIwaGlnaCUyMHJlc29sdXRpb24lMkMlMjBzaXR0aW5nJTIwb24lMjBhJTIwcGFyayUyMGJlbmNoJTIyJTBBaW1nX3VybCUyMCUzRCUyMCUyMmh0dHBzJTNBJTJGJTJGcmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSUyRkNvbXBWaXMlMkZsYXRlbnQtZGlmZnVzaW9uJTJGbWFpbiUyRmRhdGElMkZpbnBhaW50aW5nX2V4YW1wbGVzJTJGb3ZlcnR1cmUtY3JlYXRpb25zLTVzSTZmUWdZSXVvLnBuZyUyMiUwQW1hc2tfdXJsJTIwJTNEJTIwJTIyaHR0cHMlM0ElMkYlMkZyYXcuZ2l0aHVidXNlcmNvbnRlbnQuY29tJTJGQ29tcFZpcyUyRmxhdGVudC1kaWZmdXNpb24lMkZtYWluJTJGZGF0YSUyRmlucGFpbnRpbmdfZXhhbXBsZXMlMkZvdmVydHVyZS1jcmVhdGlvbnMtNXNJNmZRZ1lJdW9fbWFzay5wbmclMjIlMEFzb3VyY2UlMjAlM0QlMjBsb2FkX2ltYWdlKGltZ191cmwpJTBBbWFzayUyMCUzRCUyMGxvYWRfaW1hZ2UobWFza191cmwpJTBBaW1hZ2UlMjAlM0QlMjBwaXBlKHByb21wdCUzRHByb21wdCUyQyUyMGltYWdlJTNEc291cmNlJTJDJTIwbWFza19pbWFnZSUzRG1hc2spLmltYWdlcyU1QjAlNUQlMEFpbWFnZS5zYXZlKCUyMmZsdXhfaW5wYWludGluZy5wbmclMjIp",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> FluxInpaintPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| <span class="hljs-meta">>>> </span>pipe = FluxInpaintPipeline.from_pretrained(<span class="hljs-string">"black-forest-labs/FLUX.1-schnell"</span>, torch_dtype=torch.bfloat16) | |
| <span class="hljs-meta">>>> </span>pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"Face of a yellow cat, high resolution, sitting on a park bench"</span> | |
| <span class="hljs-meta">>>> </span>img_url = <span class="hljs-string">"https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"</span> | |
| <span class="hljs-meta">>>> </span>mask_url = <span class="hljs-string">"https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"</span> | |
| <span class="hljs-meta">>>> </span>source = load_image(img_url) | |
| <span class="hljs-meta">>>> </span>mask = load_image(mask_url) | |
| <span class="hljs-meta">>>> </span>image = pipe(prompt=prompt, image=source, mask_image=mask).images[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>image.save(<span class="hljs-string">"flux_inpainting.png"</span>)`,wrap:!1}}),{c(){d=i("p"),d.textContent=w,y=o(),c(b.$$.fragment)},l(r){d=p(r,"P",{"data-svelte-h":!0}),_(d)!=="svelte-kvfsh7"&&(d.textContent=w),y=s(r),m(b.$$.fragment,r)},m(r,I){l(r,d,I),l(r,y,I),g(b,r,I),M=!0},p:E,i(r){M||(u(b.$$.fragment,r),M=!0)},o(r){f(b.$$.fragment,r),M=!1},d(r){r&&(n(d),n(y)),h(b,r)}}}function np(J){let d,w="Examples:",y,b,M;return b=new U({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwRmx1eENvbnRyb2xOZXRJbnBhaW50UGlwZWxpbmUlMEFmcm9tJTIwZGlmZnVzZXJzLm1vZGVscyUyMGltcG9ydCUyMEZsdXhDb250cm9sTmV0TW9kZWwlMEFmcm9tJTIwZGlmZnVzZXJzLnV0aWxzJTIwaW1wb3J0JTIwbG9hZF9pbWFnZSUwQSUwQWNvbnRyb2xuZXQlMjAlM0QlMjBGbHV4Q29udHJvbE5ldE1vZGVsLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJJbnN0YW50WCUyRkZMVVguMS1kZXYtY29udHJvbG5ldC1jYW5ueSUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUwQSklMEFwaXBlJTIwJTNEJTIwRmx1eENvbnRyb2xOZXRJbnBhaW50UGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMmJsYWNrLWZvcmVzdC1sYWJzJTJGRkxVWC4xLXNjaG5lbGwlMjIlMkMlMjBjb250cm9sbmV0JTNEY29udHJvbG5ldCUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUwQSklMEFwaXBlLnRvKCUyMmN1ZGElMjIpJTBBJTBBY29udHJvbF9pbWFnZSUyMCUzRCUyMGxvYWRfaW1hZ2UoJTBBJTIwJTIwJTIwJTIwJTIyaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRkluc3RhbnRYJTJGRkxVWC4xLWRldi1Db250cm9sbmV0LUNhbm55LWFscGhhJTJGcmVzb2x2ZSUyRm1haW4lMkZjYW5ueS5qcGclMjIlMEEpJTBBaW5pdF9pbWFnZSUyMCUzRCUyMGxvYWRfaW1hZ2UoJTBBJTIwJTIwJTIwJTIwJTIyaHR0cHMlM0ElMkYlMkZyYXcuZ2l0aHVidXNlcmNvbnRlbnQuY29tJTJGQ29tcFZpcyUyRmxhdGVudC1kaWZmdXNpb24lMkZtYWluJTJGZGF0YSUyRmlucGFpbnRpbmdfZXhhbXBsZXMlMkZvdmVydHVyZS1jcmVhdGlvbnMtNXNJNmZRZ1lJdW8ucG5nJTIyJTBBKSUwQW1hc2tfaW1hZ2UlMjAlM0QlMjBsb2FkX2ltYWdlKCUwQSUyMCUyMCUyMCUyMCUyMmh0dHBzJTNBJTJGJTJGcmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSUyRkNvbXBWaXMlMkZsYXRlbnQtZGlmZnVzaW9uJTJGbWFpbiUyRmRhdGElMkZpbnBhaW50aW5nX2V4YW1wbGVzJTJGb3ZlcnR1cmUtY3JlYXRpb25zLTVzSTZmUWdZSXVvX21hc2sucG5nJTIyJTBBKSUwQSUwQXByb21wdCUyMCUzRCUyMCUyMkElMjBnaXJsJTIwaG9sZGluZyUyMGElMjBzaWduJTIwdGhhdCUyMHNheXMlMjBJbnN0YW50WCUyMiUwQWltYWdlJTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjBwcm9tcHQlMkMlMEElMjAlMjAlMjAlMjBpbWFnZSUzRGluaXRfaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBtYXNrX2ltYWdlJTNEbWFza19pbWFnZSUyQyUwQSUyMCUyMCUyMCUyMGNvbnRyb2xfaW1hZ2UlM0Rjb250cm9sX2ltYWdlJTJDJTBBJTIwJTIwJTIwJTIwY29udHJvbF9ndWlkYW5jZV9zdGFydCUzRDAuMiUyQyUwQSUyMCUyMCUyMCUyMGNvbnRyb2xfZ3VpZGFuY2VfZW5kJTNEMC44JTJDJTBBJTIwJTIwJTIwJTIwY29udHJvbG5ldF9jb25kaXRpb25pbmdfc2NhbGUlM0QwLjclMkMlMEElMjAlMjAlMjAlMjBzdHJlbmd0aCUzRDAuNyUyQyUwQSUyMCUyMCUyMCUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0QyOCUyQyUwQSUyMCUyMCUyMCUyMGd1aWRhbmNlX3NjYWxlJTNEMy41JTJDJTBBKS5pbWFnZXMlNUIwJTVEJTBBaW1hZ2Uuc2F2ZSglMjJmbHV4X2NvbnRyb2xuZXRfaW5wYWludC5wbmclMjIp",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> FluxControlNetInpaintPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.models <span class="hljs-keyword">import</span> FluxControlNetModel | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| <span class="hljs-meta">>>> </span>controlnet = FluxControlNetModel.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"InstantX/FLUX.1-dev-controlnet-canny"</span>, torch_dtype=torch.float16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipe = FluxControlNetInpaintPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"black-forest-labs/FLUX.1-schnell"</span>, controlnet=controlnet, torch_dtype=torch.float16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>control_image = load_image( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"https://huggingface.co/InstantX/FLUX.1-dev-Controlnet-Canny-alpha/resolve/main/canny.jpg"</span> | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>init_image = load_image( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"</span> | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>mask_image = load_image( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"</span> | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"A girl holding a sign that says InstantX"</span> | |
| <span class="hljs-meta">>>> </span>image = pipe( | |
| <span class="hljs-meta">... </span> prompt, | |
| <span class="hljs-meta">... </span> image=init_image, | |
| <span class="hljs-meta">... </span> mask_image=mask_image, | |
| <span class="hljs-meta">... </span> control_image=control_image, | |
| <span class="hljs-meta">... </span> control_guidance_start=<span class="hljs-number">0.2</span>, | |
| <span class="hljs-meta">... </span> control_guidance_end=<span class="hljs-number">0.8</span>, | |
| <span class="hljs-meta">... </span> controlnet_conditioning_scale=<span class="hljs-number">0.7</span>, | |
| <span class="hljs-meta">... </span> strength=<span class="hljs-number">0.7</span>, | |
| <span class="hljs-meta">... </span> num_inference_steps=<span class="hljs-number">28</span>, | |
| <span class="hljs-meta">... </span> guidance_scale=<span class="hljs-number">3.5</span>, | |
| <span class="hljs-meta">... </span>).images[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>image.save(<span class="hljs-string">"flux_controlnet_inpaint.png"</span>)`,wrap:!1}}),{c(){d=i("p"),d.textContent=w,y=o(),c(b.$$.fragment)},l(r){d=p(r,"P",{"data-svelte-h":!0}),_(d)!=="svelte-kvfsh7"&&(d.textContent=w),y=s(r),m(b.$$.fragment,r)},m(r,I){l(r,d,I),l(r,y,I),g(b,r,I),M=!0},p:E,i(r){M||(u(b.$$.fragment,r),M=!0)},o(r){f(b.$$.fragment,r),M=!1},d(r){r&&(n(d),n(y)),h(b,r)}}}function op(J){let d,w="Examples:",y,b,M;return b=new U({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwRmx1eENvbnRyb2xOZXRJbWcySW1nUGlwZWxpbmUlMkMlMjBGbHV4Q29udHJvbE5ldE1vZGVsJTBBZnJvbSUyMGRpZmZ1c2Vycy51dGlscyUyMGltcG9ydCUyMGxvYWRfaW1hZ2UlMEElMEFkZXZpY2UlMjAlM0QlMjAlMjJjdWRhJTIyJTIwaWYlMjB0b3JjaC5jdWRhLmlzX2F2YWlsYWJsZSgpJTIwZWxzZSUyMCUyMmNwdSUyMiUwQSUwQWNvbnRyb2xuZXQlMjAlM0QlMjBGbHV4Q29udHJvbE5ldE1vZGVsLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJJbnN0YW50WCUyRkZMVVguMS1kZXYtQ29udHJvbG5ldC1DYW5ueS1hbHBoYSUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYlMEEpJTBBJTBBcGlwZSUyMCUzRCUyMEZsdXhDb250cm9sTmV0SW1nMkltZ1BpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJibGFjay1mb3Jlc3QtbGFicyUyRkZMVVguMS1zY2huZWxsJTIyJTJDJTIwY29udHJvbG5ldCUzRGNvbnRyb2xuZXQlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYlMEEpJTBBJTBBcGlwZS50ZXh0X2VuY29kZXIudG8odG9yY2guZmxvYXQxNiklMEFwaXBlLmNvbnRyb2xuZXQudG8odG9yY2guZmxvYXQxNiklMEFwaXBlLnRvKCUyMmN1ZGElMjIpJTBBJTBBY29udHJvbF9pbWFnZSUyMCUzRCUyMGxvYWRfaW1hZ2UoJTIyaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRkluc3RhbnRYJTJGU0QzLUNvbnRyb2xuZXQtQ2FubnklMkZyZXNvbHZlJTJGbWFpbiUyRmNhbm55LmpwZyUyMiklMEFpbml0X2ltYWdlJTIwJTNEJTIwbG9hZF9pbWFnZSglMEElMjAlMjAlMjAlMjAlMjJodHRwcyUzQSUyRiUyRnJhdy5naXRodWJ1c2VyY29udGVudC5jb20lMkZDb21wVmlzJTJGc3RhYmxlLWRpZmZ1c2lvbiUyRm1haW4lMkZhc3NldHMlMkZzdGFibGUtc2FtcGxlcyUyRmltZzJpbWclMkZza2V0Y2gtbW91bnRhaW5zLWlucHV0LmpwZyUyMiUwQSklMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJBJTIwZ2lybCUyMGluJTIwY2l0eSUyQyUyMDI1JTIweWVhcnMlMjBvbGQlMkMlMjBjb29sJTJDJTIwZnV0dXJpc3RpYyUyMiUwQWltYWdlJTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjBwcm9tcHQlMkMlMEElMjAlMjAlMjAlMjBpbWFnZSUzRGluaXRfaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBjb250cm9sX2ltYWdlJTNEY29udHJvbF9pbWFnZSUyQyUwQSUyMCUyMCUyMCUyMGNvbnRyb2xfZ3VpZGFuY2Vfc3RhcnQlM0QwLjIlMkMlMEElMjAlMjAlMjAlMjBjb250cm9sX2d1aWRhbmNlX2VuZCUzRDAuOCUyQyUwQSUyMCUyMCUyMCUyMGNvbnRyb2xuZXRfY29uZGl0aW9uaW5nX3NjYWxlJTNEMS4wJTJDJTBBJTIwJTIwJTIwJTIwc3RyZW5ndGglM0QwLjclMkMlMEElMjAlMjAlMjAlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNEMiUyQyUwQSUyMCUyMCUyMCUyMGd1aWRhbmNlX3NjYWxlJTNEMy41JTJDJTBBKS5pbWFnZXMlNUIwJTVEJTBBaW1hZ2Uuc2F2ZSglMjJmbHV4X2NvbnRyb2xuZXRfaW1nMmltZy5wbmclMjIp",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> FluxControlNetImg2ImgPipeline, FluxControlNetModel | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| <span class="hljs-meta">>>> </span>device = <span class="hljs-string">"cuda"</span> <span class="hljs-keyword">if</span> torch.cuda.is_available() <span class="hljs-keyword">else</span> <span class="hljs-string">"cpu"</span> | |
| <span class="hljs-meta">>>> </span>controlnet = FluxControlNetModel.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"InstantX/FLUX.1-dev-Controlnet-Canny-alpha"</span>, torch_dtype=torch.bfloat16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipe = FluxControlNetImg2ImgPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"black-forest-labs/FLUX.1-schnell"</span>, controlnet=controlnet, torch_dtype=torch.float16 | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipe.text_encoder.to(torch.float16) | |
| <span class="hljs-meta">>>> </span>pipe.controlnet.to(torch.float16) | |
| <span class="hljs-meta">>>> </span>pipe.to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>control_image = load_image(<span class="hljs-string">"https://huggingface.co/InstantX/SD3-Controlnet-Canny/resolve/main/canny.jpg"</span>) | |
| <span class="hljs-meta">>>> </span>init_image = load_image( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"</span> | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"A girl in city, 25 years old, cool, futuristic"</span> | |
| <span class="hljs-meta">>>> </span>image = pipe( | |
| <span class="hljs-meta">... </span> prompt, | |
| <span class="hljs-meta">... </span> image=init_image, | |
| <span class="hljs-meta">... </span> control_image=control_image, | |
| <span class="hljs-meta">... </span> control_guidance_start=<span class="hljs-number">0.2</span>, | |
| <span class="hljs-meta">... </span> control_guidance_end=<span class="hljs-number">0.8</span>, | |
| <span class="hljs-meta">... </span> controlnet_conditioning_scale=<span class="hljs-number">1.0</span>, | |
| <span class="hljs-meta">... </span> strength=<span class="hljs-number">0.7</span>, | |
| <span class="hljs-meta">... </span> num_inference_steps=<span class="hljs-number">2</span>, | |
| <span class="hljs-meta">... </span> guidance_scale=<span class="hljs-number">3.5</span>, | |
| <span class="hljs-meta">... </span>).images[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>image.save(<span class="hljs-string">"flux_controlnet_img2img.png"</span>)`,wrap:!1}}),{c(){d=i("p"),d.textContent=w,y=o(),c(b.$$.fragment)},l(r){d=p(r,"P",{"data-svelte-h":!0}),_(d)!=="svelte-kvfsh7"&&(d.textContent=w),y=s(r),m(b.$$.fragment,r)},m(r,I){l(r,d,I),l(r,y,I),g(b,r,I),M=!0},p:E,i(r){M||(u(b.$$.fragment,r),M=!0)},o(r){f(b.$$.fragment,r),M=!1},d(r){r&&(n(d),n(y)),h(b,r)}}}function sp(J){let d,w="Examples:",y,b,M;return b=new U({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwY29udHJvbG5ldF9hdXglMjBpbXBvcnQlMjBDYW5ueURldGVjdG9yJTBBZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMEZsdXhDb250cm9sUGlwZWxpbmUlMEFmcm9tJTIwZGlmZnVzZXJzLnV0aWxzJTIwaW1wb3J0JTIwbG9hZF9pbWFnZSUwQSUwQXBpcGUlMjAlM0QlMjBGbHV4Q29udHJvbFBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJibGFjay1mb3Jlc3QtbGFicyUyRkZMVVguMS1DYW5ueS1kZXYlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2JTBBKS50byglMjJjdWRhJTIyKSUwQSUwQXByb21wdCUyMCUzRCUyMCUyMkElMjByb2JvdCUyMG1hZGUlMjBvZiUyMGV4b3RpYyUyMGNhbmRpZXMlMjBhbmQlMjBjaG9jb2xhdGVzJTIwb2YlMjBkaWZmZXJlbnQlMjBraW5kcy4lMjBUaGUlMjBiYWNrZ3JvdW5kJTIwaXMlMjBmaWxsZWQlMjB3aXRoJTIwY29uZmV0dGklMjBhbmQlMjBjZWxlYnJhdG9yeSUyMGdpZnRzLiUyMiUwQWNvbnRyb2xfaW1hZ2UlMjAlM0QlMjBsb2FkX2ltYWdlKCUwQSUyMCUyMCUyMCUyMCUyMmh0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZkYXRhc2V0cyUyRmh1Z2dpbmdmYWNlJTJGZG9jdW1lbnRhdGlvbi1pbWFnZXMlMkZyZXNvbHZlJTJGbWFpbiUyRnJvYm90LnBuZyUyMiUwQSklMEElMEFwcm9jZXNzb3IlMjAlM0QlMjBDYW5ueURldGVjdG9yKCklMEFjb250cm9sX2ltYWdlJTIwJTNEJTIwcHJvY2Vzc29yKCUwQSUyMCUyMCUyMCUyMGNvbnRyb2xfaW1hZ2UlMkMlMjBsb3dfdGhyZXNob2xkJTNENTAlMkMlMjBoaWdoX3RocmVzaG9sZCUzRDIwMCUyQyUyMGRldGVjdF9yZXNvbHV0aW9uJTNEMTAyNCUyQyUyMGltYWdlX3Jlc29sdXRpb24lM0QxMDI0JTBBKSUwQSUwQWltYWdlJTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjBwcm9tcHQlM0Rwcm9tcHQlMkMlMEElMjAlMjAlMjAlMjBjb250cm9sX2ltYWdlJTNEY29udHJvbF9pbWFnZSUyQyUwQSUyMCUyMCUyMCUyMGhlaWdodCUzRDEwMjQlMkMlMEElMjAlMjAlMjAlMjB3aWR0aCUzRDEwMjQlMkMlMEElMjAlMjAlMjAlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNENTAlMkMlMEElMjAlMjAlMjAlMjBndWlkYW5jZV9zY2FsZSUzRDMwLjAlMkMlMEEpLmltYWdlcyU1QjAlNUQlMEFpbWFnZS5zYXZlKCUyMm91dHB1dC5wbmclMjIp",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> controlnet_aux <span class="hljs-keyword">import</span> CannyDetector | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> FluxControlPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| <span class="hljs-meta">>>> </span>pipe = FluxControlPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"black-forest-labs/FLUX.1-Canny-dev"</span>, torch_dtype=torch.bfloat16 | |
| <span class="hljs-meta">... </span>).to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts."</span> | |
| <span class="hljs-meta">>>> </span>control_image = load_image( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png"</span> | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>processor = CannyDetector() | |
| <span class="hljs-meta">>>> </span>control_image = processor( | |
| <span class="hljs-meta">... </span> control_image, low_threshold=<span class="hljs-number">50</span>, high_threshold=<span class="hljs-number">200</span>, detect_resolution=<span class="hljs-number">1024</span>, image_resolution=<span class="hljs-number">1024</span> | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>image = pipe( | |
| <span class="hljs-meta">... </span> prompt=prompt, | |
| <span class="hljs-meta">... </span> control_image=control_image, | |
| <span class="hljs-meta">... </span> height=<span class="hljs-number">1024</span>, | |
| <span class="hljs-meta">... </span> width=<span class="hljs-number">1024</span>, | |
| <span class="hljs-meta">... </span> num_inference_steps=<span class="hljs-number">50</span>, | |
| <span class="hljs-meta">... </span> guidance_scale=<span class="hljs-number">30.0</span>, | |
| <span class="hljs-meta">... </span>).images[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>image.save(<span class="hljs-string">"output.png"</span>)`,wrap:!1}}),{c(){d=i("p"),d.textContent=w,y=o(),c(b.$$.fragment)},l(r){d=p(r,"P",{"data-svelte-h":!0}),_(d)!=="svelte-kvfsh7"&&(d.textContent=w),y=s(r),m(b.$$.fragment,r)},m(r,I){l(r,d,I),l(r,y,I),g(b,r,I),M=!0},p:E,i(r){M||(u(b.$$.fragment,r),M=!0)},o(r){f(b.$$.fragment,r),M=!1},d(r){r&&(n(d),n(y)),h(b,r)}}}function ap(J){let d,w="Examples:",y,b,M;return b=new U({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwY29udHJvbG5ldF9hdXglMjBpbXBvcnQlMjBDYW5ueURldGVjdG9yJTBBZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMEZsdXhDb250cm9sSW1nMkltZ1BpcGVsaW5lJTBBZnJvbSUyMGRpZmZ1c2Vycy51dGlscyUyMGltcG9ydCUyMGxvYWRfaW1hZ2UlMEElMEFwaXBlJTIwJTNEJTIwRmx1eENvbnRyb2xJbWcySW1nUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMmJsYWNrLWZvcmVzdC1sYWJzJTJGRkxVWC4xLUNhbm55LWRldiUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYlMEEpLnRvKCUyMmN1ZGElMjIpJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTIyQSUyMHJvYm90JTIwbWFkZSUyMG9mJTIwZXhvdGljJTIwY2FuZGllcyUyMGFuZCUyMGNob2NvbGF0ZXMlMjBvZiUyMGRpZmZlcmVudCUyMGtpbmRzLiUyMEFic3RyYWN0JTIwYmFja2dyb3VuZCUyMiUwQWltYWdlJTIwJTNEJTIwbG9hZF9pbWFnZSglMEElMjAlMjAlMjAlMjAlMjJodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGZGF0YXNldHMlMkZodWdnaW5nZmFjZSUyRmRvY3VtZW50YXRpb24taW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZkaWZmdXNlcnMlMkZ3YXRlcmNvbG9yLXBhaW50aW5nLmpwZyUyMiUwQSklMEFjb250cm9sX2ltYWdlJTIwJTNEJTIwbG9hZF9pbWFnZSglMEElMjAlMjAlMjAlMjAlMjJodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGZGF0YXNldHMlMkZodWdnaW5nZmFjZSUyRmRvY3VtZW50YXRpb24taW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZyb2JvdC5wbmclMjIlMEEpJTBBJTBBcHJvY2Vzc29yJTIwJTNEJTIwQ2FubnlEZXRlY3RvcigpJTBBY29udHJvbF9pbWFnZSUyMCUzRCUyMHByb2Nlc3NvciglMEElMjAlMjAlMjAlMjBjb250cm9sX2ltYWdlJTJDJTIwbG93X3RocmVzaG9sZCUzRDUwJTJDJTIwaGlnaF90aHJlc2hvbGQlM0QyMDAlMkMlMjBkZXRlY3RfcmVzb2x1dGlvbiUzRDEwMjQlMkMlMjBpbWFnZV9yZXNvbHV0aW9uJTNEMTAyNCUwQSklMEElMEFpbWFnZSUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwcHJvbXB0JTNEcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwaW1hZ2UlM0RpbWFnZSUyQyUwQSUyMCUyMCUyMCUyMGNvbnRyb2xfaW1hZ2UlM0Rjb250cm9sX2ltYWdlJTJDJTBBJTIwJTIwJTIwJTIwc3RyZW5ndGglM0QwLjglMkMlMEElMjAlMjAlMjAlMjBoZWlnaHQlM0QxMDI0JTJDJTBBJTIwJTIwJTIwJTIwd2lkdGglM0QxMDI0JTJDJTBBJTIwJTIwJTIwJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDUwJTJDJTBBJTIwJTIwJTIwJTIwZ3VpZGFuY2Vfc2NhbGUlM0QzMC4wJTJDJTBBKS5pbWFnZXMlNUIwJTVEJTBBaW1hZ2Uuc2F2ZSglMjJvdXRwdXQucG5nJTIyKQ==",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> controlnet_aux <span class="hljs-keyword">import</span> CannyDetector | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> FluxControlImg2ImgPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| <span class="hljs-meta">>>> </span>pipe = FluxControlImg2ImgPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"black-forest-labs/FLUX.1-Canny-dev"</span>, torch_dtype=torch.bfloat16 | |
| <span class="hljs-meta">... </span>).to(<span class="hljs-string">"cuda"</span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">"A robot made of exotic candies and chocolates of different kinds. Abstract background"</span> | |
| <span class="hljs-meta">>>> </span>image = load_image( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/watercolor-painting.jpg"</span> | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>control_image = load_image( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png"</span> | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>processor = CannyDetector() | |
| <span class="hljs-meta">>>> </span>control_image = processor( | |
| <span class="hljs-meta">... </span> control_image, low_threshold=<span class="hljs-number">50</span>, high_threshold=<span class="hljs-number">200</span>, detect_resolution=<span class="hljs-number">1024</span>, image_resolution=<span class="hljs-number">1024</span> | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>image = pipe( | |
| <span class="hljs-meta">... </span> prompt=prompt, | |
| <span class="hljs-meta">... </span> image=image, | |
| <span class="hljs-meta">... </span> control_image=control_image, | |
| <span class="hljs-meta">... </span> strength=<span class="hljs-number">0.8</span>, | |
| <span class="hljs-meta">... </span> height=<span class="hljs-number">1024</span>, | |
| <span class="hljs-meta">... </span> width=<span class="hljs-number">1024</span>, | |
| <span class="hljs-meta">... </span> num_inference_steps=<span class="hljs-number">50</span>, | |
| <span class="hljs-meta">... </span> guidance_scale=<span class="hljs-number">30.0</span>, | |
| <span class="hljs-meta">... </span>).images[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>image.save(<span class="hljs-string">"output.png"</span>)`,wrap:!1}}),{c(){d=i("p"),d.textContent=w,y=o(),c(b.$$.fragment)},l(r){d=p(r,"P",{"data-svelte-h":!0}),_(d)!=="svelte-kvfsh7"&&(d.textContent=w),y=s(r),m(b.$$.fragment,r)},m(r,I){l(r,d,I),l(r,y,I),g(b,r,I),M=!0},p:E,i(r){M||(u(b.$$.fragment,r),M=!0)},o(r){f(b.$$.fragment,r),M=!1},d(r){r&&(n(d),n(y)),h(b,r)}}}function lp(J){let d,w="Examples:",y,b,M;return b=new U({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwRmx1eFByaW9yUmVkdXhQaXBlbGluZSUyQyUyMEZsdXhQaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBsb2FkX2ltYWdlJTBBJTBBZGV2aWNlJTIwJTNEJTIwJTIyY3VkYSUyMiUwQWR0eXBlJTIwJTNEJTIwdG9yY2guYmZsb2F0MTYlMEElMEFyZXBvX3JlZHV4JTIwJTNEJTIwJTIyYmxhY2stZm9yZXN0LWxhYnMlMkZGTFVYLjEtUmVkdXgtZGV2JTIyJTBBcmVwb19iYXNlJTIwJTNEJTIwJTIyYmxhY2stZm9yZXN0LWxhYnMlMkZGTFVYLjEtZGV2JTIyJTBBcGlwZV9wcmlvcl9yZWR1eCUyMCUzRCUyMEZsdXhQcmlvclJlZHV4UGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKHJlcG9fcmVkdXglMkMlMjB0b3JjaF9kdHlwZSUzRGR0eXBlKS50byhkZXZpY2UpJTBBcGlwZSUyMCUzRCUyMEZsdXhQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwcmVwb19iYXNlJTJDJTIwdGV4dF9lbmNvZGVyJTNETm9uZSUyQyUyMHRleHRfZW5jb2Rlcl8yJTNETm9uZSUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYlMEEpLnRvKGRldmljZSklMEElMEFpbWFnZSUyMCUzRCUyMGxvYWRfaW1hZ2UoJTBBJTIwJTIwJTIwJTIwJTIyaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRhdGFzZXRzJTJGWWlZaVh1JTJGdGVzdGluZy1pbWFnZXMlMkZyZXNvbHZlJTJGbWFpbiUyRnN0eWxlX3ppZ2d5JTJGaW1nNS5wbmclMjIlMEEpJTBBcGlwZV9wcmlvcl9vdXRwdXQlMjAlM0QlMjBwaXBlX3ByaW9yX3JlZHV4KGltYWdlKSUwQWltYWdlcyUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwZ3VpZGFuY2Vfc2NhbGUlM0QyLjUlMkMlMEElMjAlMjAlMjAlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNENTAlMkMlMEElMjAlMjAlMjAlMjBnZW5lcmF0b3IlM0R0b3JjaC5HZW5lcmF0b3IoJTIyY3B1JTIyKS5tYW51YWxfc2VlZCgwKSUyQyUwQSUyMCUyMCUyMCUyMCoqcGlwZV9wcmlvcl9vdXRwdXQlMkMlMEEpLmltYWdlcyUwQWltYWdlcyU1QjAlNUQuc2F2ZSglMjJmbHV4LXJlZHV4LnBuZyUyMik=",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> FluxPriorReduxPipeline, FluxPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| <span class="hljs-meta">>>> </span>device = <span class="hljs-string">"cuda"</span> | |
| <span class="hljs-meta">>>> </span>dtype = torch.bfloat16 | |
| <span class="hljs-meta">>>> </span>repo_redux = <span class="hljs-string">"black-forest-labs/FLUX.1-Redux-dev"</span> | |
| <span class="hljs-meta">>>> </span>repo_base = <span class="hljs-string">"black-forest-labs/FLUX.1-dev"</span> | |
| <span class="hljs-meta">>>> </span>pipe_prior_redux = FluxPriorReduxPipeline.from_pretrained(repo_redux, torch_dtype=dtype).to(device) | |
| <span class="hljs-meta">>>> </span>pipe = FluxPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> repo_base, text_encoder=<span class="hljs-literal">None</span>, text_encoder_2=<span class="hljs-literal">None</span>, torch_dtype=torch.bfloat16 | |
| <span class="hljs-meta">... </span>).to(device) | |
| <span class="hljs-meta">>>> </span>image = load_image( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/style_ziggy/img5.png"</span> | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>pipe_prior_output = pipe_prior_redux(image) | |
| <span class="hljs-meta">>>> </span>images = pipe( | |
| <span class="hljs-meta">... </span> guidance_scale=<span class="hljs-number">2.5</span>, | |
| <span class="hljs-meta">... </span> num_inference_steps=<span class="hljs-number">50</span>, | |
| <span class="hljs-meta">... </span> generator=torch.Generator(<span class="hljs-string">"cpu"</span>).manual_seed(<span class="hljs-number">0</span>), | |
| <span class="hljs-meta">... </span> **pipe_prior_output, | |
| <span class="hljs-meta">... </span>).images | |
| <span class="hljs-meta">>>> </span>images[<span class="hljs-number">0</span>].save(<span class="hljs-string">"flux-redux.png"</span>)`,wrap:!1}}),{c(){d=i("p"),d.textContent=w,y=o(),c(b.$$.fragment)},l(r){d=p(r,"P",{"data-svelte-h":!0}),_(d)!=="svelte-kvfsh7"&&(d.textContent=w),y=s(r),m(b.$$.fragment,r)},m(r,I){l(r,d,I),l(r,y,I),g(b,r,I),M=!0},p:E,i(r){M||(u(b.$$.fragment,r),M=!0)},o(r){f(b.$$.fragment,r),M=!1},d(r){r&&(n(d),n(y)),h(b,r)}}}function rp(J){let d,w="Examples:",y,b,M;return b=new U({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwRmx1eEZpbGxQaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBsb2FkX2ltYWdlJTBBJTBBaW1hZ2UlMjAlM0QlMjBsb2FkX2ltYWdlKCUyMmh0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZkYXRhc2V0cyUyRllpWWlYdSUyRnRlc3RpbmctaW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZjdXAucG5nJTIyKSUwQW1hc2slMjAlM0QlMjBsb2FkX2ltYWdlKCUyMmh0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZkYXRhc2V0cyUyRllpWWlYdSUyRnRlc3RpbmctaW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZjdXBfbWFzay5wbmclMjIpJTBBJTBBcGlwZSUyMCUzRCUyMEZsdXhGaWxsUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUyMmJsYWNrLWZvcmVzdC1sYWJzJTJGRkxVWC4xLUZpbGwtZGV2JTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5iZmxvYXQxNiklMEFwaXBlLmVuYWJsZV9tb2RlbF9jcHVfb2ZmbG9hZCgpJTIwJTIwJTIzJTIwc2F2ZSUyMHNvbWUlMjBWUkFNJTIwYnklMjBvZmZsb2FkaW5nJTIwdGhlJTIwbW9kZWwlMjB0byUyMENQVSUwQSUwQWltYWdlJTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjBwcm9tcHQlM0QlMjJhJTIwd2hpdGUlMjBwYXBlciUyMGN1cCUyMiUyQyUwQSUyMCUyMCUyMCUyMGltYWdlJTNEaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBtYXNrX2ltYWdlJTNEbWFzayUyQyUwQSUyMCUyMCUyMCUyMGhlaWdodCUzRDE2MzIlMkMlMEElMjAlMjAlMjAlMjB3aWR0aCUzRDEyMzIlMkMlMEElMjAlMjAlMjAlMjBndWlkYW5jZV9zY2FsZSUzRDMwJTJDJTBBJTIwJTIwJTIwJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDUwJTJDJTBBJTIwJTIwJTIwJTIwbWF4X3NlcXVlbmNlX2xlbmd0aCUzRDUxMiUyQyUwQSUyMCUyMCUyMCUyMGdlbmVyYXRvciUzRHRvcmNoLkdlbmVyYXRvciglMjJjcHUlMjIpLm1hbnVhbF9zZWVkKDApJTJDJTBBKS5pbWFnZXMlNUIwJTVEJTBBaW1hZ2Uuc2F2ZSglMjJmbHV4X2ZpbGwucG5nJTIyKQ==",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> FluxFillPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| <span class="hljs-meta">>>> </span>image = load_image(<span class="hljs-string">"https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/cup.png"</span>) | |
| <span class="hljs-meta">>>> </span>mask = load_image(<span class="hljs-string">"https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/cup_mask.png"</span>) | |
| <span class="hljs-meta">>>> </span>pipe = FluxFillPipeline.from_pretrained(<span class="hljs-string">"black-forest-labs/FLUX.1-Fill-dev"</span>, torch_dtype=torch.bfloat16) | |
| <span class="hljs-meta">>>> </span>pipe.enable_model_cpu_offload() <span class="hljs-comment"># save some VRAM by offloading the model to CPU</span> | |
| <span class="hljs-meta">>>> </span>image = pipe( | |
| <span class="hljs-meta">... </span> prompt=<span class="hljs-string">"a white paper cup"</span>, | |
| <span class="hljs-meta">... </span> image=image, | |
| <span class="hljs-meta">... </span> mask_image=mask, | |
| <span class="hljs-meta">... </span> height=<span class="hljs-number">1632</span>, | |
| <span class="hljs-meta">... </span> width=<span class="hljs-number">1232</span>, | |
| <span class="hljs-meta">... </span> guidance_scale=<span class="hljs-number">30</span>, | |
| <span class="hljs-meta">... </span> num_inference_steps=<span class="hljs-number">50</span>, | |
| <span class="hljs-meta">... </span> max_sequence_length=<span class="hljs-number">512</span>, | |
| <span class="hljs-meta">... </span> generator=torch.Generator(<span class="hljs-string">"cpu"</span>).manual_seed(<span class="hljs-number">0</span>), | |
| <span class="hljs-meta">... </span>).images[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>image.save(<span class="hljs-string">"flux_fill.png"</span>)`,wrap:!1}}),{c(){d=i("p"),d.textContent=w,y=o(),c(b.$$.fragment)},l(r){d=p(r,"P",{"data-svelte-h":!0}),_(d)!=="svelte-kvfsh7"&&(d.textContent=w),y=s(r),m(b.$$.fragment,r)},m(r,I){l(r,d,I),l(r,y,I),g(b,r,I),M=!0},p:E,i(r){M||(u(b.$$.fragment,r),M=!0)},o(r){f(b.$$.fragment,r),M=!1},d(r){r&&(n(d),n(y)),h(b,r)}}}function ip(J){let d,w,y,b,M,r,I,fr='<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/> <img alt="MPS" src="https://img.shields.io/badge/MPS-000000?style=flat&logo=apple&logoColor=white%22"/>',es,Ne,hr='Flux is a series of text-to-image generation models based on diffusion transformers. To know more about Flux, check out the original <a href="https://blackforestlabs.ai/announcing-black-forest-labs/" rel="nofollow">blog post</a> by the creators of Flux, Black Forest Labs.',ts,Pe,_r='Original model checkpoints for Flux can be found <a href="https://huggingface.co/black-forest-labs" rel="nofollow">here</a>. Original inference code can be found <a href="https://github.com/black-forest-labs/flux" rel="nofollow">here</a>.',ns,le,os,$e,br="Flux comes in the following variants:",ss,Le,yr='<thead><tr><th align="center">model type</th> <th align="center">model id</th></tr></thead> <tbody><tr><td align="center">Timestep-distilled</td> <td align="center"><a href="https://huggingface.co/black-forest-labs/FLUX.1-schnell" rel="nofollow"><code>black-forest-labs/FLUX.1-schnell</code></a></td></tr> <tr><td align="center">Guidance-distilled</td> <td align="center"><a href="https://huggingface.co/black-forest-labs/FLUX.1-dev" rel="nofollow"><code>black-forest-labs/FLUX.1-dev</code></a></td></tr> <tr><td align="center">Fill Inpainting/Outpainting (Guidance-distilled)</td> <td align="center"><a href="https://huggingface.co/black-forest-labs/FLUX.1-Fill-dev" rel="nofollow"><code>black-forest-labs/FLUX.1-Fill-dev</code></a></td></tr> <tr><td align="center">Canny Control (Guidance-distilled)</td> <td align="center"><a href="https://huggingface.co/black-forest-labs/FLUX.1-Canny-dev" rel="nofollow"><code>black-forest-labs/FLUX.1-Canny-dev</code></a></td></tr> <tr><td align="center">Depth Control (Guidance-distilled)</td> <td align="center"><a href="https://huggingface.co/black-forest-labs/FLUX.1-Depth-dev" rel="nofollow"><code>black-forest-labs/FLUX.1-Depth-dev</code></a></td></tr> <tr><td align="center">Canny Control (LoRA)</td> <td align="center"><a href="https://huggingface.co/black-forest-labs/FLUX.1-Canny-dev-lora" rel="nofollow"><code>black-forest-labs/FLUX.1-Canny-dev-lora</code></a></td></tr> <tr><td align="center">Depth Control (LoRA)</td> <td align="center"><a href="https://huggingface.co/black-forest-labs/FLUX.1-Depth-dev-lora" rel="nofollow"><code>black-forest-labs/FLUX.1-Depth-dev-lora</code></a></td></tr> <tr><td align="center">Redux (Adapter)</td> <td align="center"><a href="https://huggingface.co/black-forest-labs/FLUX.1-Redux-dev" rel="nofollow"><code>black-forest-labs/FLUX.1-Redux-dev</code></a></td></tr></tbody>',as,Re,Mr="All checkpoints have different usage which we detail below.",ls,Xe,rs,Ee,xr="<li><code>max_sequence_length</code> cannot be more than 256.</li> <li><code>guidance_scale</code> needs to be 0.</li> <li>As this is a timestep-distilled model, it benefits from fewer sampling steps.</li>",is,Ve,ps,Ye,ds,ze,wr="<li>The guidance-distilled variant takes about 50 sampling steps for good-quality generation.</li> <li>It doesn’t have any limitations around the <code>max_sequence_length</code>.</li>",cs,Qe,ms,Ae,gs,He,Tr="<li>Flux Fill pipeline does not require <code>strength</code> as an input like regular inpainting pipelines.</li> <li>It supports both inpainting and outpainting.</li>",us,Se,fs,qe,hs,De,Ir='<strong>Note:</strong> <code>black-forest-labs/Flux.1-Canny-dev</code> is <em>not</em> a <a href="/docs/diffusers/pr_11739/en/api/models/controlnet#diffusers.ControlNetModel">ControlNetModel</a> model. ControlNet models are a separate component from the UNet/Transformer whose residuals are added to the actual underlying model. Canny Control is an alternate architecture that achieves effectively the same results as a ControlNet model would, by using channel-wise concatenation with input control condition and ensuring the transformer learns structure control by following the condition as closely as possible.',_s,Oe,bs,Ke,vr="Canny Control is also possible with a LoRA variant of this condition. The usage is as follows:",ys,et,Ms,tt,xs,nt,Jr='<strong>Note:</strong> <code>black-forest-labs/Flux.1-Depth-dev</code> is <em>not</em> a ControlNet model. <a href="/docs/diffusers/pr_11739/en/api/models/controlnet#diffusers.ControlNetModel">ControlNetModel</a> models are a separate component from the UNet/Transformer whose residuals are added to the actual underlying model. Depth Control is an alternate architecture that achieves effectively the same results as a ControlNet model would, by using channel-wise concatenation with input control condition and ensuring the transformer learns structure control by following the condition as closely as possible.',ws,ot,Ts,st,jr="Depth Control is also possible with a LoRA variant of this condition. The usage is as follows:",Is,at,vs,lt,Js,rt,Ur="<li>Flux Redux pipeline is an adapter for FLUX.1 base models. It can be used with both flux-dev and flux-schnell, for image-to-image generation.</li> <li>You can first use the <code>FluxPriorReduxPipeline</code> to get the <code>prompt_embeds</code> and <code>pooled_prompt_embeds</code>, and then feed them into the <code>FluxPipeline</code> for image-to-image generation.</li> <li>When use <code>FluxPriorReduxPipeline</code> with a base pipeline, you can set <code>text_encoder=None</code> and <code>text_encoder_2=None</code> in the base pipeline, in order to save VRAM.</li>",js,it,Us,pt,Fs,dt,Fr='We can combine Flux Turbo LoRAs with Flux Control and other pipelines like Fill and Redux to enable few-steps’ inference. The example below shows how to do that for Flux Control LoRA for depth and turbo LoRA from <a href="https://hf.co/ByteDance/Hyper-SD" rel="nofollow"><code>ByteDance/Hyper-SD</code></a>.',Zs,ct,ks,mt,Cs,gt,Zr='When unloading the Control LoRA weights, call <code>pipe.unload_lora_weights(reset_to_overwritten_params=True)</code> to reset the <code>pipe.transformer</code> completely back to its original form. The resultant pipeline can then be used with methods like <a href="/docs/diffusers/pr_11739/en/api/pipelines/overview#diffusers.DiffusionPipeline.from_pipe">DiffusionPipeline.from_pipe()</a>. More details about this argument are available in <a href="https://github.com/huggingface/diffusers/pull/10397" rel="nofollow">this PR</a>.',Ws,ut,Gs,re,Bs,ft,kr="An IP-Adapter lets you prompt Flux with images, in addition to the text prompt. This is especially useful when describing complex concepts that are difficult to articulate through text alone and you have reference images.",Ns,ht,Ps,ie,Cr='<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/flux_ip_adapter_output.jpg"/> <figcaption class="mt-2 text-sm text-center text-gray-500">IP-Adapter examples with prompt "wearing sunglasses"</figcaption>',$s,_t,Ls,bt,Wr="Flux is a very large model and requires ~50GB of RAM/VRAM to load all the modeling components. Enable some of the optimizations below to lower the memory requirements.",Rs,yt,Xs,Mt,Gr='<a href="../../optimization/memory#group-offloading">Group offloading</a> lowers VRAM usage by offloading groups of internal layers rather than the whole model or weights. You need to use <a href="/docs/diffusers/pr_11739/en/api/utilities#diffusers.hooks.apply_group_offloading">apply_group_offloading()</a> on all the model components of a pipeline. The <code>offload_type</code> parameter allows you to toggle between block and leaf-level offloading. Setting it to <code>leaf_level</code> offloads the lowest leaf-level parameters to the CPU instead of offloading at the module-level.',Es,xt,Br="On CUDA devices that support asynchronous data streaming, set <code>use_stream=True</code> to overlap data transfer and computation to accelerate inference.",Vs,pe,Ys,wt,zs,Tt,Qs,It,Nr='Flux can generate high-quality images with FP16 (i.e. to accelerate inference on Turing/Volta GPUs) but produces different outputs compared to FP32/BF16. The issue is that some activations in the text encoders have to be clipped when running in FP16, which affects the overall image. Forcing text encoders to run with FP32 inference thus removes this output difference. See <a href="https://github.com/huggingface/diffusers/pull/9097#issuecomment-2272292516" rel="nofollow">here</a> for details.',As,vt,Pr="FP16 inference code:",Hs,Jt,Ss,jt,qs,Ut,$r="Quantization helps reduce the memory requirements of very large models by storing model weights in a lower precision data type. However, quantization may have varying impact on video quality depending on the video model.",Ds,Ft,Lr='Refer to the <a href="../../quantization/overview">Quantization</a> overview to learn more about supported quantization backends and selecting a quantization backend that supports your use case. The example below demonstrates how to load a quantized <a href="/docs/diffusers/pr_11739/en/api/pipelines/flux#diffusers.FluxPipeline">FluxPipeline</a> for inference with bitsandbytes.',Os,Zt,Ks,kt,ea,Ct,Rr="The <code>FluxTransformer2DModel</code> supports loading checkpoints in the original format shipped by Black Forest Labs. This is also useful when trying to load finetunes or quantized versions of the models that have been published by the community.",ta,de,na,Wt,Xr="The following example demonstrates how to run Flux with less than 16GB of VRAM.",oa,Gt,Er="First install <code>optimum-quanto</code>",sa,Bt,aa,Nt,Vr="Then run the following example",la,Pt,ra,$t,ia,F,Lt,Pa,Qn,Yr="The Flux pipeline for text-to-image generation.",$a,An,zr='Reference: <a href="https://blackforestlabs.ai/announcing-black-forest-labs/" rel="nofollow">https://blackforestlabs.ai/announcing-black-forest-labs/</a>',La,V,Rt,Ra,Hn,Qr="Function invoked when calling the pipeline for generation.",Xa,ce,Ea,me,Xt,Va,Sn,Ar=`Disable sliced VAE decoding. If <code>enable_vae_slicing</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,Ya,ge,Et,za,qn,Hr=`Disable tiled VAE decoding. If <code>enable_vae_tiling</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,Qa,ue,Vt,Aa,Dn,Sr=`Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to | |
| compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.`,Ha,fe,Yt,Sa,On,qr=`Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to | |
| compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow | |
| processing larger images.`,qa,Kn,zt,pa,Qt,da,Z,At,Da,eo,Dr="The Flux pipeline for image inpainting.",Oa,to,Or='Reference: <a href="https://blackforestlabs.ai/announcing-black-forest-labs/" rel="nofollow">https://blackforestlabs.ai/announcing-black-forest-labs/</a>',Ka,Y,Ht,el,no,Kr="Function invoked when calling the pipeline for generation.",tl,he,nl,_e,St,ol,oo,ei=`Disable sliced VAE decoding. If <code>enable_vae_slicing</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,sl,be,qt,al,so,ti=`Disable tiled VAE decoding. If <code>enable_vae_tiling</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,ll,ye,Dt,rl,ao,ni=`Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to | |
| compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.`,il,Me,Ot,pl,lo,oi=`Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to | |
| compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow | |
| processing larger images.`,dl,ro,Kt,ca,en,ma,P,tn,cl,io,si="The Flux pipeline for image inpainting.",ml,po,ai='Reference: <a href="https://blackforestlabs.ai/announcing-black-forest-labs/" rel="nofollow">https://blackforestlabs.ai/announcing-black-forest-labs/</a>',gl,z,nn,ul,co,li="Function invoked when calling the pipeline for generation.",fl,xe,hl,mo,on,ga,sn,ua,$,an,_l,go,ri="The Flux controlnet pipeline for inpainting.",bl,uo,ii='Reference: <a href="https://blackforestlabs.ai/announcing-black-forest-labs/" rel="nofollow">https://blackforestlabs.ai/announcing-black-forest-labs/</a>',yl,Q,ln,Ml,fo,pi="Function invoked when calling the pipeline for generation.",xl,we,wl,ho,rn,fa,pn,ha,L,dn,Tl,_o,di="The Flux controlnet pipeline for image-to-image generation.",Il,bo,ci='Reference: <a href="https://blackforestlabs.ai/announcing-black-forest-labs/" rel="nofollow">https://blackforestlabs.ai/announcing-black-forest-labs/</a>',vl,A,cn,Jl,yo,mi="Function invoked when calling the pipeline for generation.",jl,Te,Ul,Mo,mn,_a,gn,ba,k,un,Fl,xo,gi="The Flux pipeline for controllable text-to-image generation.",Zl,wo,ui='Reference: <a href="https://blackforestlabs.ai/announcing-black-forest-labs/" rel="nofollow">https://blackforestlabs.ai/announcing-black-forest-labs/</a>',kl,H,fn,Cl,To,fi="Function invoked when calling the pipeline for generation.",Wl,Ie,Gl,ve,hn,Bl,Io,hi=`Disable sliced VAE decoding. If <code>enable_vae_slicing</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,Nl,Je,_n,Pl,vo,_i=`Disable tiled VAE decoding. If <code>enable_vae_tiling</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,$l,je,bn,Ll,Jo,bi=`Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to | |
| compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.`,Rl,Ue,yn,Xl,jo,yi=`Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to | |
| compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow | |
| processing larger images.`,El,Uo,Mn,ya,xn,Ma,R,wn,Vl,Fo,Mi="The Flux pipeline for image inpainting.",Yl,Zo,xi='Reference: <a href="https://blackforestlabs.ai/announcing-black-forest-labs/" rel="nofollow">https://blackforestlabs.ai/announcing-black-forest-labs/</a>',zl,S,Tn,Ql,ko,wi="Function invoked when calling the pipeline for generation.",Al,Fe,Hl,Co,In,xa,vn,wa,X,Jn,Sl,Wo,Ti="The Flux Redux pipeline for image-to-image generation.",ql,Go,Ii='Reference: <a href="https://blackforestlabs.ai/flux-1-tools/" rel="nofollow">https://blackforestlabs.ai/flux-1-tools/</a>',Dl,q,jn,Ol,Bo,vi="Function invoked when calling the pipeline for generation.",Kl,Ze,er,No,Un,Ta,Fn,Ia,C,Zn,tr,Po,Ji="The Flux Fill pipeline for image inpainting/outpainting.",nr,$o,ji='Reference: <a href="https://blackforestlabs.ai/flux-1-tools/" rel="nofollow">https://blackforestlabs.ai/flux-1-tools/</a>',or,D,kn,sr,Lo,Ui="Function invoked when calling the pipeline for generation.",ar,ke,lr,Ce,Cn,rr,Ro,Fi=`Disable sliced VAE decoding. If <code>enable_vae_slicing</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,ir,We,Wn,pr,Xo,Zi=`Disable tiled VAE decoding. If <code>enable_vae_tiling</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,dr,Ge,Gn,cr,Eo,ki=`Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to | |
| compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.`,mr,Be,Bn,gr,Vo,Ci=`Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to | |
| compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow | |
| processing larger images.`,ur,Yo,Nn,va,Pn,Ja,Oo,ja;return M=new j({props:{title:"Flux",local:"flux",headingTag:"h1"}}),le=new Na({props:{$$slots:{default:[Si]},$$scope:{ctx:J}}}),Xe=new j({props:{title:"Timestep-distilled",local:"timestep-distilled",headingTag:"h3"}}),Ve=new U({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwRmx1eFBpcGVsaW5lJTBBJTBBcGlwZSUyMCUzRCUyMEZsdXhQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTIyYmxhY2stZm9yZXN0LWxhYnMlMkZGTFVYLjEtc2NobmVsbCUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYpJTBBcGlwZS5lbmFibGVfbW9kZWxfY3B1X29mZmxvYWQoKSUwQSUwQXByb21wdCUyMCUzRCUyMCUyMkElMjBjYXQlMjBob2xkaW5nJTIwYSUyMHNpZ24lMjB0aGF0JTIwc2F5cyUyMGhlbGxvJTIwd29ybGQlMjIlMEFvdXQlMjAlM0QlMjBwaXBlKCUwQSUyMCUyMCUyMCUyMHByb21wdCUzRHByb21wdCUyQyUwQSUyMCUyMCUyMCUyMGd1aWRhbmNlX3NjYWxlJTNEMC4lMkMlMEElMjAlMjAlMjAlMjBoZWlnaHQlM0Q3NjglMkMlMEElMjAlMjAlMjAlMjB3aWR0aCUzRDEzNjAlMkMlMEElMjAlMjAlMjAlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNENCUyQyUwQSUyMCUyMCUyMCUyMG1heF9zZXF1ZW5jZV9sZW5ndGglM0QyNTYlMkMlMEEpLmltYWdlcyU1QjAlNUQlMEFvdXQuc2F2ZSglMjJpbWFnZS5wbmclMjIp",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> FluxPipeline | |
| pipe = FluxPipeline.from_pretrained(<span class="hljs-string">"black-forest-labs/FLUX.1-schnell"</span>, torch_dtype=torch.bfloat16) | |
| pipe.enable_model_cpu_offload() | |
| prompt = <span class="hljs-string">"A cat holding a sign that says hello world"</span> | |
| out = pipe( | |
| prompt=prompt, | |
| guidance_scale=<span class="hljs-number">0.</span>, | |
| height=<span class="hljs-number">768</span>, | |
| width=<span class="hljs-number">1360</span>, | |
| num_inference_steps=<span class="hljs-number">4</span>, | |
| max_sequence_length=<span class="hljs-number">256</span>, | |
| ).images[<span class="hljs-number">0</span>] | |
| out.save(<span class="hljs-string">"image.png"</span>)`,wrap:!1}}),Ye=new j({props:{title:"Guidance-distilled",local:"guidance-distilled",headingTag:"h3"}}),Qe=new U({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwRmx1eFBpcGVsaW5lJTBBJTBBcGlwZSUyMCUzRCUyMEZsdXhQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTIyYmxhY2stZm9yZXN0LWxhYnMlMkZGTFVYLjEtZGV2JTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5iZmxvYXQxNiklMEFwaXBlLmVuYWJsZV9tb2RlbF9jcHVfb2ZmbG9hZCgpJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTIyYSUyMHRpbnklMjBhc3Ryb25hdXQlMjBoYXRjaGluZyUyMGZyb20lMjBhbiUyMGVnZyUyMG9uJTIwdGhlJTIwbW9vbiUyMiUwQW91dCUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwcHJvbXB0JTNEcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwZ3VpZGFuY2Vfc2NhbGUlM0QzLjUlMkMlMEElMjAlMjAlMjAlMjBoZWlnaHQlM0Q3NjglMkMlMEElMjAlMjAlMjAlMjB3aWR0aCUzRDEzNjAlMkMlMEElMjAlMjAlMjAlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNENTAlMkMlMEEpLmltYWdlcyU1QjAlNUQlMEFvdXQuc2F2ZSglMjJpbWFnZS5wbmclMjIp",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> FluxPipeline | |
| pipe = FluxPipeline.from_pretrained(<span class="hljs-string">"black-forest-labs/FLUX.1-dev"</span>, torch_dtype=torch.bfloat16) | |
| pipe.enable_model_cpu_offload() | |
| prompt = <span class="hljs-string">"a tiny astronaut hatching from an egg on the moon"</span> | |
| out = pipe( | |
| prompt=prompt, | |
| guidance_scale=<span class="hljs-number">3.5</span>, | |
| height=<span class="hljs-number">768</span>, | |
| width=<span class="hljs-number">1360</span>, | |
| num_inference_steps=<span class="hljs-number">50</span>, | |
| ).images[<span class="hljs-number">0</span>] | |
| out.save(<span class="hljs-string">"image.png"</span>)`,wrap:!1}}),Ae=new j({props:{title:"Fill Inpainting/Outpainting",local:"fill-inpaintingoutpainting",headingTag:"h3"}}),Se=new U({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwRmx1eEZpbGxQaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBsb2FkX2ltYWdlJTBBJTBBaW1hZ2UlMjAlM0QlMjBsb2FkX2ltYWdlKCUyMmh0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZkYXRhc2V0cyUyRllpWWlYdSUyRnRlc3RpbmctaW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZjdXAucG5nJTIyKSUwQW1hc2slMjAlM0QlMjBsb2FkX2ltYWdlKCUyMmh0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZkYXRhc2V0cyUyRllpWWlYdSUyRnRlc3RpbmctaW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZjdXBfbWFzay5wbmclMjIpJTBBJTBBcmVwb19pZCUyMCUzRCUyMCUyMmJsYWNrLWZvcmVzdC1sYWJzJTJGRkxVWC4xLUZpbGwtZGV2JTIyJTBBcGlwZSUyMCUzRCUyMEZsdXhGaWxsUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKHJlcG9faWQlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2KS50byglMjJjdWRhJTIyKSUwQSUwQWltYWdlJTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjBwcm9tcHQlM0QlMjJhJTIwd2hpdGUlMjBwYXBlciUyMGN1cCUyMiUyQyUwQSUyMCUyMCUyMCUyMGltYWdlJTNEaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBtYXNrX2ltYWdlJTNEbWFzayUyQyUwQSUyMCUyMCUyMCUyMGhlaWdodCUzRDE2MzIlMkMlMEElMjAlMjAlMjAlMjB3aWR0aCUzRDEyMzIlMkMlMEElMjAlMjAlMjAlMjBtYXhfc2VxdWVuY2VfbGVuZ3RoJTNENTEyJTJDJTBBJTIwJTIwJTIwJTIwZ2VuZXJhdG9yJTNEdG9yY2guR2VuZXJhdG9yKCUyMmNwdSUyMikubWFudWFsX3NlZWQoMCklMEEpLmltYWdlcyU1QjAlNUQlMEFpbWFnZS5zYXZlKGYlMjJvdXRwdXQucG5nJTIyKQ==",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> FluxFillPipeline | |
| <span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| image = load_image(<span class="hljs-string">"https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/cup.png"</span>) | |
| mask = load_image(<span class="hljs-string">"https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/cup_mask.png"</span>) | |
| repo_id = <span class="hljs-string">"black-forest-labs/FLUX.1-Fill-dev"</span> | |
| pipe = FluxFillPipeline.from_pretrained(repo_id, torch_dtype=torch.bfloat16).to(<span class="hljs-string">"cuda"</span>) | |
| image = pipe( | |
| prompt=<span class="hljs-string">"a white paper cup"</span>, | |
| image=image, | |
| mask_image=mask, | |
| height=<span class="hljs-number">1632</span>, | |
| width=<span class="hljs-number">1232</span>, | |
| max_sequence_length=<span class="hljs-number">512</span>, | |
| generator=torch.Generator(<span class="hljs-string">"cpu"</span>).manual_seed(<span class="hljs-number">0</span>) | |
| ).images[<span class="hljs-number">0</span>] | |
| image.save(<span class="hljs-string">f"output.png"</span>)`,wrap:!1}}),qe=new j({props:{title:"Canny Control",local:"canny-control",headingTag:"h3"}}),Oe=new U({props:{code:"JTIzJTIwIXBpcCUyMGluc3RhbGwlMjAtVSUyMGNvbnRyb2xuZXQtYXV4JTBBaW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwY29udHJvbG5ldF9hdXglMjBpbXBvcnQlMjBDYW5ueURldGVjdG9yJTBBZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMEZsdXhDb250cm9sUGlwZWxpbmUlMEFmcm9tJTIwZGlmZnVzZXJzLnV0aWxzJTIwaW1wb3J0JTIwbG9hZF9pbWFnZSUwQSUwQXBpcGUlMjAlM0QlMjBGbHV4Q29udHJvbFBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJibGFjay1mb3Jlc3QtbGFicyUyRkZMVVguMS1DYW5ueS1kZXYlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2KS50byglMjJjdWRhJTIyKSUwQSUwQXByb21wdCUyMCUzRCUyMCUyMkElMjByb2JvdCUyMG1hZGUlMjBvZiUyMGV4b3RpYyUyMGNhbmRpZXMlMjBhbmQlMjBjaG9jb2xhdGVzJTIwb2YlMjBkaWZmZXJlbnQlMjBraW5kcy4lMjBUaGUlMjBiYWNrZ3JvdW5kJTIwaXMlMjBmaWxsZWQlMjB3aXRoJTIwY29uZmV0dGklMjBhbmQlMjBjZWxlYnJhdG9yeSUyMGdpZnRzLiUyMiUwQWNvbnRyb2xfaW1hZ2UlMjAlM0QlMjBsb2FkX2ltYWdlKCUyMmh0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZkYXRhc2V0cyUyRmh1Z2dpbmdmYWNlJTJGZG9jdW1lbnRhdGlvbi1pbWFnZXMlMkZyZXNvbHZlJTJGbWFpbiUyRnJvYm90LnBuZyUyMiklMEElMEFwcm9jZXNzb3IlMjAlM0QlMjBDYW5ueURldGVjdG9yKCklMEFjb250cm9sX2ltYWdlJTIwJTNEJTIwcHJvY2Vzc29yKGNvbnRyb2xfaW1hZ2UlMkMlMjBsb3dfdGhyZXNob2xkJTNENTAlMkMlMjBoaWdoX3RocmVzaG9sZCUzRDIwMCUyQyUyMGRldGVjdF9yZXNvbHV0aW9uJTNEMTAyNCUyQyUyMGltYWdlX3Jlc29sdXRpb24lM0QxMDI0KSUwQSUwQWltYWdlJTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjBwcm9tcHQlM0Rwcm9tcHQlMkMlMEElMjAlMjAlMjAlMjBjb250cm9sX2ltYWdlJTNEY29udHJvbF9pbWFnZSUyQyUwQSUyMCUyMCUyMCUyMGhlaWdodCUzRDEwMjQlMkMlMEElMjAlMjAlMjAlMjB3aWR0aCUzRDEwMjQlMkMlMEElMjAlMjAlMjAlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNENTAlMkMlMEElMjAlMjAlMjAlMjBndWlkYW5jZV9zY2FsZSUzRDMwLjAlMkMlMEEpLmltYWdlcyU1QjAlNUQlMEFpbWFnZS5zYXZlKCUyMm91dHB1dC5wbmclMjIp",highlighted:`<span class="hljs-comment"># !pip install -U controlnet-aux</span> | |
| <span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> controlnet_aux <span class="hljs-keyword">import</span> CannyDetector | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> FluxControlPipeline | |
| <span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| pipe = FluxControlPipeline.from_pretrained(<span class="hljs-string">"black-forest-labs/FLUX.1-Canny-dev"</span>, torch_dtype=torch.bfloat16).to(<span class="hljs-string">"cuda"</span>) | |
| prompt = <span class="hljs-string">"A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts."</span> | |
| control_image = load_image(<span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png"</span>) | |
| processor = CannyDetector() | |
| control_image = processor(control_image, low_threshold=<span class="hljs-number">50</span>, high_threshold=<span class="hljs-number">200</span>, detect_resolution=<span class="hljs-number">1024</span>, image_resolution=<span class="hljs-number">1024</span>) | |
| image = pipe( | |
| prompt=prompt, | |
| control_image=control_image, | |
| height=<span class="hljs-number">1024</span>, | |
| width=<span class="hljs-number">1024</span>, | |
| num_inference_steps=<span class="hljs-number">50</span>, | |
| guidance_scale=<span class="hljs-number">30.0</span>, | |
| ).images[<span class="hljs-number">0</span>] | |
| image.save(<span class="hljs-string">"output.png"</span>)`,wrap:!1}}),et=new U({props:{code:"JTIzJTIwIXBpcCUyMGluc3RhbGwlMjAtVSUyMGNvbnRyb2xuZXQtYXV4JTBBaW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwY29udHJvbG5ldF9hdXglMjBpbXBvcnQlMjBDYW5ueURldGVjdG9yJTBBZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMEZsdXhDb250cm9sUGlwZWxpbmUlMEFmcm9tJTIwZGlmZnVzZXJzLnV0aWxzJTIwaW1wb3J0JTIwbG9hZF9pbWFnZSUwQSUwQXBpcGUlMjAlM0QlMjBGbHV4Q29udHJvbFBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJibGFjay1mb3Jlc3QtbGFicyUyRkZMVVguMS1kZXYlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2KS50byglMjJjdWRhJTIyKSUwQXBpcGUubG9hZF9sb3JhX3dlaWdodHMoJTIyYmxhY2stZm9yZXN0LWxhYnMlMkZGTFVYLjEtQ2FubnktZGV2LWxvcmElMjIpJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTIyQSUyMHJvYm90JTIwbWFkZSUyMG9mJTIwZXhvdGljJTIwY2FuZGllcyUyMGFuZCUyMGNob2NvbGF0ZXMlMjBvZiUyMGRpZmZlcmVudCUyMGtpbmRzLiUyMFRoZSUyMGJhY2tncm91bmQlMjBpcyUyMGZpbGxlZCUyMHdpdGglMjBjb25mZXR0aSUyMGFuZCUyMGNlbGVicmF0b3J5JTIwZ2lmdHMuJTIyJTBBY29udHJvbF9pbWFnZSUyMCUzRCUyMGxvYWRfaW1hZ2UoJTIyaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRhdGFzZXRzJTJGaHVnZ2luZ2ZhY2UlMkZkb2N1bWVudGF0aW9uLWltYWdlcyUyRnJlc29sdmUlMkZtYWluJTJGcm9ib3QucG5nJTIyKSUwQSUwQXByb2Nlc3NvciUyMCUzRCUyMENhbm55RGV0ZWN0b3IoKSUwQWNvbnRyb2xfaW1hZ2UlMjAlM0QlMjBwcm9jZXNzb3IoY29udHJvbF9pbWFnZSUyQyUyMGxvd190aHJlc2hvbGQlM0Q1MCUyQyUyMGhpZ2hfdGhyZXNob2xkJTNEMjAwJTJDJTIwZGV0ZWN0X3Jlc29sdXRpb24lM0QxMDI0JTJDJTIwaW1hZ2VfcmVzb2x1dGlvbiUzRDEwMjQpJTBBJTBBaW1hZ2UlMjAlM0QlMjBwaXBlKCUwQSUyMCUyMCUyMCUyMHByb21wdCUzRHByb21wdCUyQyUwQSUyMCUyMCUyMCUyMGNvbnRyb2xfaW1hZ2UlM0Rjb250cm9sX2ltYWdlJTJDJTBBJTIwJTIwJTIwJTIwaGVpZ2h0JTNEMTAyNCUyQyUwQSUyMCUyMCUyMCUyMHdpZHRoJTNEMTAyNCUyQyUwQSUyMCUyMCUyMCUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0Q1MCUyQyUwQSUyMCUyMCUyMCUyMGd1aWRhbmNlX3NjYWxlJTNEMzAuMCUyQyUwQSkuaW1hZ2VzJTVCMCU1RCUwQWltYWdlLnNhdmUoJTIyb3V0cHV0LnBuZyUyMik=",highlighted:`<span class="hljs-comment"># !pip install -U controlnet-aux</span> | |
| <span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> controlnet_aux <span class="hljs-keyword">import</span> CannyDetector | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> FluxControlPipeline | |
| <span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| pipe = FluxControlPipeline.from_pretrained(<span class="hljs-string">"black-forest-labs/FLUX.1-dev"</span>, torch_dtype=torch.bfloat16).to(<span class="hljs-string">"cuda"</span>) | |
| pipe.load_lora_weights(<span class="hljs-string">"black-forest-labs/FLUX.1-Canny-dev-lora"</span>) | |
| prompt = <span class="hljs-string">"A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts."</span> | |
| control_image = load_image(<span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png"</span>) | |
| processor = CannyDetector() | |
| control_image = processor(control_image, low_threshold=<span class="hljs-number">50</span>, high_threshold=<span class="hljs-number">200</span>, detect_resolution=<span class="hljs-number">1024</span>, image_resolution=<span class="hljs-number">1024</span>) | |
| image = pipe( | |
| prompt=prompt, | |
| control_image=control_image, | |
| height=<span class="hljs-number">1024</span>, | |
| width=<span class="hljs-number">1024</span>, | |
| num_inference_steps=<span class="hljs-number">50</span>, | |
| guidance_scale=<span class="hljs-number">30.0</span>, | |
| ).images[<span class="hljs-number">0</span>] | |
| image.save(<span class="hljs-string">"output.png"</span>)`,wrap:!1}}),tt=new j({props:{title:"Depth Control",local:"depth-control",headingTag:"h3"}}),ot=new U({props:{code:"JTIzJTIwIXBpcCUyMGluc3RhbGwlMjBnaXQlMkJodHRwcyUzQSUyRiUyRmdpdGh1Yi5jb20lMkZodWdnaW5nZmFjZSUyRmltYWdlX2dlbl9hdXglMEFpbXBvcnQlMjB0b3JjaCUwQWZyb20lMjBkaWZmdXNlcnMlMjBpbXBvcnQlMjBGbHV4Q29udHJvbFBpcGVsaW5lJTJDJTIwRmx1eFRyYW5zZm9ybWVyMkRNb2RlbCUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBsb2FkX2ltYWdlJTBBZnJvbSUyMGltYWdlX2dlbl9hdXglMjBpbXBvcnQlMjBEZXB0aFByZXByb2Nlc3NvciUwQSUwQXBpcGUlMjAlM0QlMjBGbHV4Q29udHJvbFBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJibGFjay1mb3Jlc3QtbGFicyUyRkZMVVguMS1EZXB0aC1kZXYlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2KS50byglMjJjdWRhJTIyKSUwQSUwQXByb21wdCUyMCUzRCUyMCUyMkElMjByb2JvdCUyMG1hZGUlMjBvZiUyMGV4b3RpYyUyMGNhbmRpZXMlMjBhbmQlMjBjaG9jb2xhdGVzJTIwb2YlMjBkaWZmZXJlbnQlMjBraW5kcy4lMjBUaGUlMjBiYWNrZ3JvdW5kJTIwaXMlMjBmaWxsZWQlMjB3aXRoJTIwY29uZmV0dGklMjBhbmQlMjBjZWxlYnJhdG9yeSUyMGdpZnRzLiUyMiUwQWNvbnRyb2xfaW1hZ2UlMjAlM0QlMjBsb2FkX2ltYWdlKCUyMmh0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZkYXRhc2V0cyUyRmh1Z2dpbmdmYWNlJTJGZG9jdW1lbnRhdGlvbi1pbWFnZXMlMkZyZXNvbHZlJTJGbWFpbiUyRnJvYm90LnBuZyUyMiklMEElMEFwcm9jZXNzb3IlMjAlM0QlMjBEZXB0aFByZXByb2Nlc3Nvci5mcm9tX3ByZXRyYWluZWQoJTIyTGloZVlvdW5nJTJGZGVwdGgtYW55dGhpbmctbGFyZ2UtaGYlMjIpJTBBY29udHJvbF9pbWFnZSUyMCUzRCUyMHByb2Nlc3Nvcihjb250cm9sX2ltYWdlKSU1QjAlNUQuY29udmVydCglMjJSR0IlMjIpJTBBJTBBaW1hZ2UlMjAlM0QlMjBwaXBlKCUwQSUyMCUyMCUyMCUyMHByb21wdCUzRHByb21wdCUyQyUwQSUyMCUyMCUyMCUyMGNvbnRyb2xfaW1hZ2UlM0Rjb250cm9sX2ltYWdlJTJDJTBBJTIwJTIwJTIwJTIwaGVpZ2h0JTNEMTAyNCUyQyUwQSUyMCUyMCUyMCUyMHdpZHRoJTNEMTAyNCUyQyUwQSUyMCUyMCUyMCUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0QzMCUyQyUwQSUyMCUyMCUyMCUyMGd1aWRhbmNlX3NjYWxlJTNEMTAuMCUyQyUwQSUyMCUyMCUyMCUyMGdlbmVyYXRvciUzRHRvcmNoLkdlbmVyYXRvcigpLm1hbnVhbF9zZWVkKDQyKSUyQyUwQSkuaW1hZ2VzJTVCMCU1RCUwQWltYWdlLnNhdmUoJTIyb3V0cHV0LnBuZyUyMik=",highlighted:`<span class="hljs-comment"># !pip install git+https://github.com/huggingface/image_gen_aux</span> | |
| <span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> FluxControlPipeline, FluxTransformer2DModel | |
| <span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| <span class="hljs-keyword">from</span> image_gen_aux <span class="hljs-keyword">import</span> DepthPreprocessor | |
| pipe = FluxControlPipeline.from_pretrained(<span class="hljs-string">"black-forest-labs/FLUX.1-Depth-dev"</span>, torch_dtype=torch.bfloat16).to(<span class="hljs-string">"cuda"</span>) | |
| prompt = <span class="hljs-string">"A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts."</span> | |
| control_image = load_image(<span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png"</span>) | |
| processor = DepthPreprocessor.from_pretrained(<span class="hljs-string">"LiheYoung/depth-anything-large-hf"</span>) | |
| control_image = processor(control_image)[<span class="hljs-number">0</span>].convert(<span class="hljs-string">"RGB"</span>) | |
| image = pipe( | |
| prompt=prompt, | |
| control_image=control_image, | |
| height=<span class="hljs-number">1024</span>, | |
| width=<span class="hljs-number">1024</span>, | |
| num_inference_steps=<span class="hljs-number">30</span>, | |
| guidance_scale=<span class="hljs-number">10.0</span>, | |
| generator=torch.Generator().manual_seed(<span class="hljs-number">42</span>), | |
| ).images[<span class="hljs-number">0</span>] | |
| image.save(<span class="hljs-string">"output.png"</span>)`,wrap:!1}}),at=new U({props:{code:"JTIzJTIwIXBpcCUyMGluc3RhbGwlMjBnaXQlMkJodHRwcyUzQSUyRiUyRmdpdGh1Yi5jb20lMkZodWdnaW5nZmFjZSUyRmltYWdlX2dlbl9hdXglMEFpbXBvcnQlMjB0b3JjaCUwQWZyb20lMjBkaWZmdXNlcnMlMjBpbXBvcnQlMjBGbHV4Q29udHJvbFBpcGVsaW5lJTJDJTIwRmx1eFRyYW5zZm9ybWVyMkRNb2RlbCUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBsb2FkX2ltYWdlJTBBZnJvbSUyMGltYWdlX2dlbl9hdXglMjBpbXBvcnQlMjBEZXB0aFByZXByb2Nlc3NvciUwQSUwQXBpcGUlMjAlM0QlMjBGbHV4Q29udHJvbFBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJibGFjay1mb3Jlc3QtbGFicyUyRkZMVVguMS1kZXYlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2KS50byglMjJjdWRhJTIyKSUwQXBpcGUubG9hZF9sb3JhX3dlaWdodHMoJTIyYmxhY2stZm9yZXN0LWxhYnMlMkZGTFVYLjEtRGVwdGgtZGV2LWxvcmElMjIpJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTIyQSUyMHJvYm90JTIwbWFkZSUyMG9mJTIwZXhvdGljJTIwY2FuZGllcyUyMGFuZCUyMGNob2NvbGF0ZXMlMjBvZiUyMGRpZmZlcmVudCUyMGtpbmRzLiUyMFRoZSUyMGJhY2tncm91bmQlMjBpcyUyMGZpbGxlZCUyMHdpdGglMjBjb25mZXR0aSUyMGFuZCUyMGNlbGVicmF0b3J5JTIwZ2lmdHMuJTIyJTBBY29udHJvbF9pbWFnZSUyMCUzRCUyMGxvYWRfaW1hZ2UoJTIyaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRhdGFzZXRzJTJGaHVnZ2luZ2ZhY2UlMkZkb2N1bWVudGF0aW9uLWltYWdlcyUyRnJlc29sdmUlMkZtYWluJTJGcm9ib3QucG5nJTIyKSUwQSUwQXByb2Nlc3NvciUyMCUzRCUyMERlcHRoUHJlcHJvY2Vzc29yLmZyb21fcHJldHJhaW5lZCglMjJMaWhlWW91bmclMkZkZXB0aC1hbnl0aGluZy1sYXJnZS1oZiUyMiklMEFjb250cm9sX2ltYWdlJTIwJTNEJTIwcHJvY2Vzc29yKGNvbnRyb2xfaW1hZ2UpJTVCMCU1RC5jb252ZXJ0KCUyMlJHQiUyMiklMEElMEFpbWFnZSUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwcHJvbXB0JTNEcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwY29udHJvbF9pbWFnZSUzRGNvbnRyb2xfaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBoZWlnaHQlM0QxMDI0JTJDJTBBJTIwJTIwJTIwJTIwd2lkdGglM0QxMDI0JTJDJTBBJTIwJTIwJTIwJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDMwJTJDJTBBJTIwJTIwJTIwJTIwZ3VpZGFuY2Vfc2NhbGUlM0QxMC4wJTJDJTBBJTIwJTIwJTIwJTIwZ2VuZXJhdG9yJTNEdG9yY2guR2VuZXJhdG9yKCkubWFudWFsX3NlZWQoNDIpJTJDJTBBKS5pbWFnZXMlNUIwJTVEJTBBaW1hZ2Uuc2F2ZSglMjJvdXRwdXQucG5nJTIyKQ==",highlighted:`<span class="hljs-comment"># !pip install git+https://github.com/huggingface/image_gen_aux</span> | |
| <span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> FluxControlPipeline, FluxTransformer2DModel | |
| <span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| <span class="hljs-keyword">from</span> image_gen_aux <span class="hljs-keyword">import</span> DepthPreprocessor | |
| pipe = FluxControlPipeline.from_pretrained(<span class="hljs-string">"black-forest-labs/FLUX.1-dev"</span>, torch_dtype=torch.bfloat16).to(<span class="hljs-string">"cuda"</span>) | |
| pipe.load_lora_weights(<span class="hljs-string">"black-forest-labs/FLUX.1-Depth-dev-lora"</span>) | |
| prompt = <span class="hljs-string">"A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts."</span> | |
| control_image = load_image(<span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png"</span>) | |
| processor = DepthPreprocessor.from_pretrained(<span class="hljs-string">"LiheYoung/depth-anything-large-hf"</span>) | |
| control_image = processor(control_image)[<span class="hljs-number">0</span>].convert(<span class="hljs-string">"RGB"</span>) | |
| image = pipe( | |
| prompt=prompt, | |
| control_image=control_image, | |
| height=<span class="hljs-number">1024</span>, | |
| width=<span class="hljs-number">1024</span>, | |
| num_inference_steps=<span class="hljs-number">30</span>, | |
| guidance_scale=<span class="hljs-number">10.0</span>, | |
| generator=torch.Generator().manual_seed(<span class="hljs-number">42</span>), | |
| ).images[<span class="hljs-number">0</span>] | |
| image.save(<span class="hljs-string">"output.png"</span>)`,wrap:!1}}),lt=new j({props:{title:"Redux",local:"redux",headingTag:"h3"}}),it=new U({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwRmx1eFByaW9yUmVkdXhQaXBlbGluZSUyQyUyMEZsdXhQaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBsb2FkX2ltYWdlJTBBZGV2aWNlJTIwJTNEJTIwJTIyY3VkYSUyMiUwQWR0eXBlJTIwJTNEJTIwdG9yY2guYmZsb2F0MTYlMEElMEElMEFyZXBvX3JlZHV4JTIwJTNEJTIwJTIyYmxhY2stZm9yZXN0LWxhYnMlMkZGTFVYLjEtUmVkdXgtZGV2JTIyJTBBcmVwb19iYXNlJTIwJTNEJTIwJTIyYmxhY2stZm9yZXN0LWxhYnMlMkZGTFVYLjEtZGV2JTIyJTIwJTBBcGlwZV9wcmlvcl9yZWR1eCUyMCUzRCUyMEZsdXhQcmlvclJlZHV4UGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKHJlcG9fcmVkdXglMkMlMjB0b3JjaF9kdHlwZSUzRGR0eXBlKS50byhkZXZpY2UpJTBBcGlwZSUyMCUzRCUyMEZsdXhQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwcmVwb19iYXNlJTJDJTIwJTBBJTIwJTIwJTIwJTIwdGV4dF9lbmNvZGVyJTNETm9uZSUyQyUwQSUyMCUyMCUyMCUyMHRleHRfZW5jb2Rlcl8yJTNETm9uZSUyQyUwQSUyMCUyMCUyMCUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYlMEEpLnRvKGRldmljZSklMEElMEFpbWFnZSUyMCUzRCUyMGxvYWRfaW1hZ2UoJTIyaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRhdGFzZXRzJTJGWWlZaVh1JTJGdGVzdGluZy1pbWFnZXMlMkZyZXNvbHZlJTJGbWFpbiUyRnN0eWxlX3ppZ2d5JTJGaW1nNS5wbmclMjIpJTBBcGlwZV9wcmlvcl9vdXRwdXQlMjAlM0QlMjBwaXBlX3ByaW9yX3JlZHV4KGltYWdlKSUwQWltYWdlcyUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwZ3VpZGFuY2Vfc2NhbGUlM0QyLjUlMkMlMEElMjAlMjAlMjAlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNENTAlMkMlMEElMjAlMjAlMjAlMjBnZW5lcmF0b3IlM0R0b3JjaC5HZW5lcmF0b3IoJTIyY3B1JTIyKS5tYW51YWxfc2VlZCgwKSUyQyUwQSUyMCUyMCUyMCUyMCoqcGlwZV9wcmlvcl9vdXRwdXQlMkMlMEEpLmltYWdlcyUwQWltYWdlcyU1QjAlNUQuc2F2ZSglMjJmbHV4LXJlZHV4LnBuZyUyMik=",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> FluxPriorReduxPipeline, FluxPipeline | |
| <span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| device = <span class="hljs-string">"cuda"</span> | |
| dtype = torch.bfloat16 | |
| repo_redux = <span class="hljs-string">"black-forest-labs/FLUX.1-Redux-dev"</span> | |
| repo_base = <span class="hljs-string">"black-forest-labs/FLUX.1-dev"</span> | |
| pipe_prior_redux = FluxPriorReduxPipeline.from_pretrained(repo_redux, torch_dtype=dtype).to(device) | |
| pipe = FluxPipeline.from_pretrained( | |
| repo_base, | |
| text_encoder=<span class="hljs-literal">None</span>, | |
| text_encoder_2=<span class="hljs-literal">None</span>, | |
| torch_dtype=torch.bfloat16 | |
| ).to(device) | |
| image = load_image(<span class="hljs-string">"https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/style_ziggy/img5.png"</span>) | |
| pipe_prior_output = pipe_prior_redux(image) | |
| images = pipe( | |
| guidance_scale=<span class="hljs-number">2.5</span>, | |
| num_inference_steps=<span class="hljs-number">50</span>, | |
| generator=torch.Generator(<span class="hljs-string">"cpu"</span>).manual_seed(<span class="hljs-number">0</span>), | |
| **pipe_prior_output, | |
| ).images | |
| images[<span class="hljs-number">0</span>].save(<span class="hljs-string">"flux-redux.png"</span>)`,wrap:!1}}),pt=new j({props:{title:"Combining Flux Turbo LoRAs with Flux Control, Fill, and Redux",local:"combining-flux-turbo-loras-with-flux-control-fill-and-redux",headingTag:"h2"}}),ct=new U({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMEZsdXhDb250cm9sUGlwZWxpbmUlMEFmcm9tJTIwaW1hZ2VfZ2VuX2F1eCUyMGltcG9ydCUyMERlcHRoUHJlcHJvY2Vzc29yJTBBZnJvbSUyMGRpZmZ1c2Vycy51dGlscyUyMGltcG9ydCUyMGxvYWRfaW1hZ2UlMEFmcm9tJTIwaHVnZ2luZ2ZhY2VfaHViJTIwaW1wb3J0JTIwaGZfaHViX2Rvd25sb2FkJTBBaW1wb3J0JTIwdG9yY2glMEElMEFjb250cm9sX3BpcGUlMjAlM0QlMjBGbHV4Q29udHJvbFBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMjJibGFjay1mb3Jlc3QtbGFicyUyRkZMVVguMS1kZXYlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2KSUwQWNvbnRyb2xfcGlwZS5sb2FkX2xvcmFfd2VpZ2h0cyglMjJibGFjay1mb3Jlc3QtbGFicyUyRkZMVVguMS1EZXB0aC1kZXYtbG9yYSUyMiUyQyUyMGFkYXB0ZXJfbmFtZSUzRCUyMmRlcHRoJTIyKSUwQWNvbnRyb2xfcGlwZS5sb2FkX2xvcmFfd2VpZ2h0cyglMEElMjAlMjAlMjAlMjBoZl9odWJfZG93bmxvYWQoJTIyQnl0ZURhbmNlJTJGSHlwZXItU0QlMjIlMkMlMjAlMjJIeXBlci1GTFVYLjEtZGV2LThzdGVwcy1sb3JhLnNhZmV0ZW5zb3JzJTIyKSUyQyUyMGFkYXB0ZXJfbmFtZSUzRCUyMmh5cGVyLXNkJTIyJTBBKSUwQWNvbnRyb2xfcGlwZS5zZXRfYWRhcHRlcnMoJTVCJTIyZGVwdGglMjIlMkMlMjAlMjJoeXBlci1zZCUyMiU1RCUyQyUyMGFkYXB0ZXJfd2VpZ2h0cyUzRCU1QjAuODUlMkMlMjAwLjEyNSU1RCklMEFjb250cm9sX3BpcGUuZW5hYmxlX21vZGVsX2NwdV9vZmZsb2FkKCklMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJBJTIwcm9ib3QlMjBtYWRlJTIwb2YlMjBleG90aWMlMjBjYW5kaWVzJTIwYW5kJTIwY2hvY29sYXRlcyUyMG9mJTIwZGlmZmVyZW50JTIwa2luZHMuJTIwVGhlJTIwYmFja2dyb3VuZCUyMGlzJTIwZmlsbGVkJTIwd2l0aCUyMGNvbmZldHRpJTIwYW5kJTIwY2VsZWJyYXRvcnklMjBnaWZ0cy4lMjIlMEFjb250cm9sX2ltYWdlJTIwJTNEJTIwbG9hZF9pbWFnZSglMjJodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGZGF0YXNldHMlMkZodWdnaW5nZmFjZSUyRmRvY3VtZW50YXRpb24taW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZyb2JvdC5wbmclMjIpJTBBJTBBcHJvY2Vzc29yJTIwJTNEJTIwRGVwdGhQcmVwcm9jZXNzb3IuZnJvbV9wcmV0cmFpbmVkKCUyMkxpaGVZb3VuZyUyRmRlcHRoLWFueXRoaW5nLWxhcmdlLWhmJTIyKSUwQWNvbnRyb2xfaW1hZ2UlMjAlM0QlMjBwcm9jZXNzb3IoY29udHJvbF9pbWFnZSklNUIwJTVELmNvbnZlcnQoJTIyUkdCJTIyKSUwQSUwQWltYWdlJTIwJTNEJTIwY29udHJvbF9waXBlKCUwQSUyMCUyMCUyMCUyMHByb21wdCUzRHByb21wdCUyQyUwQSUyMCUyMCUyMCUyMGNvbnRyb2xfaW1hZ2UlM0Rjb250cm9sX2ltYWdlJTJDJTBBJTIwJTIwJTIwJTIwaGVpZ2h0JTNEMTAyNCUyQyUwQSUyMCUyMCUyMCUyMHdpZHRoJTNEMTAyNCUyQyUwQSUyMCUyMCUyMCUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0Q4JTJDJTBBJTIwJTIwJTIwJTIwZ3VpZGFuY2Vfc2NhbGUlM0QxMC4wJTJDJTBBJTIwJTIwJTIwJTIwZ2VuZXJhdG9yJTNEdG9yY2guR2VuZXJhdG9yKCkubWFudWFsX3NlZWQoNDIpJTJDJTBBKS5pbWFnZXMlNUIwJTVEJTBBaW1hZ2Uuc2F2ZSglMjJvdXRwdXQucG5nJTIyKQ==",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> FluxControlPipeline | |
| <span class="hljs-keyword">from</span> image_gen_aux <span class="hljs-keyword">import</span> DepthPreprocessor | |
| <span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| <span class="hljs-keyword">from</span> huggingface_hub <span class="hljs-keyword">import</span> hf_hub_download | |
| <span class="hljs-keyword">import</span> torch | |
| control_pipe = FluxControlPipeline.from_pretrained(<span class="hljs-string">"black-forest-labs/FLUX.1-dev"</span>, torch_dtype=torch.bfloat16) | |
| control_pipe.load_lora_weights(<span class="hljs-string">"black-forest-labs/FLUX.1-Depth-dev-lora"</span>, adapter_name=<span class="hljs-string">"depth"</span>) | |
| control_pipe.load_lora_weights( | |
| hf_hub_download(<span class="hljs-string">"ByteDance/Hyper-SD"</span>, <span class="hljs-string">"Hyper-FLUX.1-dev-8steps-lora.safetensors"</span>), adapter_name=<span class="hljs-string">"hyper-sd"</span> | |
| ) | |
| control_pipe.set_adapters([<span class="hljs-string">"depth"</span>, <span class="hljs-string">"hyper-sd"</span>], adapter_weights=[<span class="hljs-number">0.85</span>, <span class="hljs-number">0.125</span>]) | |
| control_pipe.enable_model_cpu_offload() | |
| prompt = <span class="hljs-string">"A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts."</span> | |
| control_image = load_image(<span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png"</span>) | |
| processor = DepthPreprocessor.from_pretrained(<span class="hljs-string">"LiheYoung/depth-anything-large-hf"</span>) | |
| control_image = processor(control_image)[<span class="hljs-number">0</span>].convert(<span class="hljs-string">"RGB"</span>) | |
| image = control_pipe( | |
| prompt=prompt, | |
| control_image=control_image, | |
| height=<span class="hljs-number">1024</span>, | |
| width=<span class="hljs-number">1024</span>, | |
| num_inference_steps=<span class="hljs-number">8</span>, | |
| guidance_scale=<span class="hljs-number">10.0</span>, | |
| generator=torch.Generator().manual_seed(<span class="hljs-number">42</span>), | |
| ).images[<span class="hljs-number">0</span>] | |
| image.save(<span class="hljs-string">"output.png"</span>)`,wrap:!1}}),mt=new j({props:{title:"Note about unload_lora_weights() when using Flux LoRAs",local:"note-about-unloadloraweights-when-using-flux-loras",headingTag:"h2"}}),ut=new j({props:{title:"IP-Adapter",local:"ip-adapter",headingTag:"h2"}}),re=new Na({props:{$$slots:{default:[qi]},$$scope:{ctx:J}}}),ht=new U({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwRmx1eFBpcGVsaW5lJTBBZnJvbSUyMGRpZmZ1c2Vycy51dGlscyUyMGltcG9ydCUyMGxvYWRfaW1hZ2UlMEElMEFwaXBlJTIwJTNEJTIwRmx1eFBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJibGFjay1mb3Jlc3QtbGFicyUyRkZMVVguMS1kZXYlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2JTBBKS50byglMjJjdWRhJTIyKSUwQSUwQWltYWdlJTIwJTNEJTIwbG9hZF9pbWFnZSglMjJodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGZGF0YXNldHMlMkZodWdnaW5nZmFjZSUyRmRvY3VtZW50YXRpb24taW1hZ2VzJTJGcmVzb2x2ZSUyRm1haW4lMkZkaWZmdXNlcnMlMkZmbHV4X2lwX2FkYXB0ZXJfaW5wdXQuanBnJTIyKS5yZXNpemUoKDEwMjQlMkMlMjAxMDI0KSklMEElMEFwaXBlLmxvYWRfaXBfYWRhcHRlciglMEElMjAlMjAlMjAlMjAlMjJYTGFicy1BSSUyRmZsdXgtaXAtYWRhcHRlciUyMiUyQyUwQSUyMCUyMCUyMCUyMHdlaWdodF9uYW1lJTNEJTIyaXBfYWRhcHRlci5zYWZldGVuc29ycyUyMiUyQyUwQSUyMCUyMCUyMCUyMGltYWdlX2VuY29kZXJfcHJldHJhaW5lZF9tb2RlbF9uYW1lX29yX3BhdGglM0QlMjJvcGVuYWklMkZjbGlwLXZpdC1sYXJnZS1wYXRjaDE0JTIyJTBBKSUwQXBpcGUuc2V0X2lwX2FkYXB0ZXJfc2NhbGUoMS4wKSUwQSUwQWltYWdlJTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjB3aWR0aCUzRDEwMjQlMkMlMEElMjAlMjAlMjAlMjBoZWlnaHQlM0QxMDI0JTJDJTBBJTIwJTIwJTIwJTIwcHJvbXB0JTNEJTIyd2VhcmluZyUyMHN1bmdsYXNzZXMlMjIlMkMlMEElMjAlMjAlMjAlMjBuZWdhdGl2ZV9wcm9tcHQlM0QlMjIlMjIlMkMlMEElMjAlMjAlMjAlMjB0cnVlX2NmZ19zY2FsZSUzRDQuMCUyQyUwQSUyMCUyMCUyMCUyMGdlbmVyYXRvciUzRHRvcmNoLkdlbmVyYXRvcigpLm1hbnVhbF9zZWVkKDQ0NDQpJTJDJTBBJTIwJTIwJTIwJTIwaXBfYWRhcHRlcl9pbWFnZSUzRGltYWdlJTJDJTBBKS5pbWFnZXMlNUIwJTVEJTBBJTBBaW1hZ2Uuc2F2ZSgnZmx1eF9pcF9hZGFwdGVyX291dHB1dC5qcGcnKQ==",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> FluxPipeline | |
| <span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| pipe = FluxPipeline.from_pretrained( | |
| <span class="hljs-string">"black-forest-labs/FLUX.1-dev"</span>, torch_dtype=torch.bfloat16 | |
| ).to(<span class="hljs-string">"cuda"</span>) | |
| image = load_image(<span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/flux_ip_adapter_input.jpg"</span>).resize((<span class="hljs-number">1024</span>, <span class="hljs-number">1024</span>)) | |
| pipe.load_ip_adapter( | |
| <span class="hljs-string">"XLabs-AI/flux-ip-adapter"</span>, | |
| weight_name=<span class="hljs-string">"ip_adapter.safetensors"</span>, | |
| image_encoder_pretrained_model_name_or_path=<span class="hljs-string">"openai/clip-vit-large-patch14"</span> | |
| ) | |
| pipe.set_ip_adapter_scale(<span class="hljs-number">1.0</span>) | |
| image = pipe( | |
| width=<span class="hljs-number">1024</span>, | |
| height=<span class="hljs-number">1024</span>, | |
| prompt=<span class="hljs-string">"wearing sunglasses"</span>, | |
| negative_prompt=<span class="hljs-string">""</span>, | |
| true_cfg_scale=<span class="hljs-number">4.0</span>, | |
| generator=torch.Generator().manual_seed(<span class="hljs-number">4444</span>), | |
| ip_adapter_image=image, | |
| ).images[<span class="hljs-number">0</span>] | |
| image.save(<span class="hljs-string">'flux_ip_adapter_output.jpg'</span>)`,wrap:!1}}),_t=new j({props:{title:"Optimize",local:"optimize",headingTag:"h2"}}),yt=new j({props:{title:"Group offloading",local:"group-offloading",headingTag:"h3"}}),pe=new Na({props:{warning:!1,$$slots:{default:[Di]},$$scope:{ctx:J}}}),wt=new U({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwRmx1eFBpcGVsaW5lJTBBZnJvbSUyMGRpZmZ1c2Vycy5ob29rcyUyMGltcG9ydCUyMGFwcGx5X2dyb3VwX29mZmxvYWRpbmclMEElMEFtb2RlbF9pZCUyMCUzRCUyMCUyMmJsYWNrLWZvcmVzdC1sYWJzJTJGRkxVWC4xLWRldiUyMiUwQWR0eXBlJTIwJTNEJTIwdG9yY2guYmZsb2F0MTYlMEFwaXBlJTIwJTNEJTIwRmx1eFBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMEElMDltb2RlbF9pZCUyQyUwQSUwOXRvcmNoX2R0eXBlJTNEZHR5cGUlMkMlMEEpJTBBJTBBYXBwbHlfZ3JvdXBfb2ZmbG9hZGluZyglMEElMjAlMjAlMjAlMjBwaXBlLnRyYW5zZm9ybWVyJTJDJTBBJTIwJTIwJTIwJTIwb2ZmbG9hZF90eXBlJTNEJTIybGVhZl9sZXZlbCUyMiUyQyUwQSUyMCUyMCUyMCUyMG9mZmxvYWRfZGV2aWNlJTNEdG9yY2guZGV2aWNlKCUyMmNwdSUyMiklMkMlMEElMjAlMjAlMjAlMjBvbmxvYWRfZGV2aWNlJTNEdG9yY2guZGV2aWNlKCUyMmN1ZGElMjIpJTJDJTBBJTIwJTIwJTIwJTIwdXNlX3N0cmVhbSUzRFRydWUlMkMlMEEpJTBBYXBwbHlfZ3JvdXBfb2ZmbG9hZGluZyglMEElMjAlMjAlMjAlMjBwaXBlLnRleHRfZW5jb2RlciUyQyUyMCUwQSUyMCUyMCUyMCUyMG9mZmxvYWRfZGV2aWNlJTNEdG9yY2guZGV2aWNlKCUyMmNwdSUyMiklMkMlMEElMjAlMjAlMjAlMjBvbmxvYWRfZGV2aWNlJTNEdG9yY2guZGV2aWNlKCUyMmN1ZGElMjIpJTJDJTBBJTIwJTIwJTIwJTIwb2ZmbG9hZF90eXBlJTNEJTIybGVhZl9sZXZlbCUyMiUyQyUwQSUyMCUyMCUyMCUyMHVzZV9zdHJlYW0lM0RUcnVlJTJDJTBBKSUwQWFwcGx5X2dyb3VwX29mZmxvYWRpbmcoJTBBJTIwJTIwJTIwJTIwcGlwZS50ZXh0X2VuY29kZXJfMiUyQyUyMCUwQSUyMCUyMCUyMCUyMG9mZmxvYWRfZGV2aWNlJTNEdG9yY2guZGV2aWNlKCUyMmNwdSUyMiklMkMlMEElMjAlMjAlMjAlMjBvbmxvYWRfZGV2aWNlJTNEdG9yY2guZGV2aWNlKCUyMmN1ZGElMjIpJTJDJTBBJTIwJTIwJTIwJTIwb2ZmbG9hZF90eXBlJTNEJTIybGVhZl9sZXZlbCUyMiUyQyUwQSUyMCUyMCUyMCUyMHVzZV9zdHJlYW0lM0RUcnVlJTJDJTBBKSUwQWFwcGx5X2dyb3VwX29mZmxvYWRpbmcoJTBBJTIwJTIwJTIwJTIwcGlwZS52YWUlMkMlMjAlMEElMjAlMjAlMjAlMjBvZmZsb2FkX2RldmljZSUzRHRvcmNoLmRldmljZSglMjJjcHUlMjIpJTJDJTBBJTIwJTIwJTIwJTIwb25sb2FkX2RldmljZSUzRHRvcmNoLmRldmljZSglMjJjdWRhJTIyKSUyQyUwQSUyMCUyMCUyMCUyMG9mZmxvYWRfdHlwZSUzRCUyMmxlYWZfbGV2ZWwlMjIlMkMlMEElMjAlMjAlMjAlMjB1c2Vfc3RyZWFtJTNEVHJ1ZSUyQyUwQSklMEElMEFwcm9tcHQlM0QlMjJBJTIwY2F0JTIwd2VhcmluZyUyMHN1bmdsYXNzZXMlMjBhbmQlMjB3b3JraW5nJTIwYXMlMjBhJTIwbGlmZWd1YXJkJTIwYXQlMjBwb29sLiUyMiUwQSUwQWdlbmVyYXRvciUyMCUzRCUyMHRvcmNoLkdlbmVyYXRvcigpLm1hbnVhbF9zZWVkKDE4MTIwMSklMEFpbWFnZSUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwd2lkdGglM0Q1NzYlMkMlMEElMjAlMjAlMjAlMjBoZWlnaHQlM0QxMDI0JTJDJTBBJTIwJTIwJTIwJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDMwJTJDJTBBJTIwJTIwJTIwJTIwZ2VuZXJhdG9yJTNEZ2VuZXJhdG9yJTBBKS5pbWFnZXMlNUIwJTVEJTBBaW1hZ2U=",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> FluxPipeline | |
| <span class="hljs-keyword">from</span> diffusers.hooks <span class="hljs-keyword">import</span> apply_group_offloading | |
| model_id = <span class="hljs-string">"black-forest-labs/FLUX.1-dev"</span> | |
| dtype = torch.bfloat16 | |
| pipe = FluxPipeline.from_pretrained( | |
| model_id, | |
| torch_dtype=dtype, | |
| ) | |
| apply_group_offloading( | |
| pipe.transformer, | |
| offload_type=<span class="hljs-string">"leaf_level"</span>, | |
| offload_device=torch.device(<span class="hljs-string">"cpu"</span>), | |
| onload_device=torch.device(<span class="hljs-string">"cuda"</span>), | |
| use_stream=<span class="hljs-literal">True</span>, | |
| ) | |
| apply_group_offloading( | |
| pipe.text_encoder, | |
| offload_device=torch.device(<span class="hljs-string">"cpu"</span>), | |
| onload_device=torch.device(<span class="hljs-string">"cuda"</span>), | |
| offload_type=<span class="hljs-string">"leaf_level"</span>, | |
| use_stream=<span class="hljs-literal">True</span>, | |
| ) | |
| apply_group_offloading( | |
| pipe.text_encoder_2, | |
| offload_device=torch.device(<span class="hljs-string">"cpu"</span>), | |
| onload_device=torch.device(<span class="hljs-string">"cuda"</span>), | |
| offload_type=<span class="hljs-string">"leaf_level"</span>, | |
| use_stream=<span class="hljs-literal">True</span>, | |
| ) | |
| apply_group_offloading( | |
| pipe.vae, | |
| offload_device=torch.device(<span class="hljs-string">"cpu"</span>), | |
| onload_device=torch.device(<span class="hljs-string">"cuda"</span>), | |
| offload_type=<span class="hljs-string">"leaf_level"</span>, | |
| use_stream=<span class="hljs-literal">True</span>, | |
| ) | |
| prompt=<span class="hljs-string">"A cat wearing sunglasses and working as a lifeguard at pool."</span> | |
| generator = torch.Generator().manual_seed(<span class="hljs-number">181201</span>) | |
| image = pipe( | |
| prompt, | |
| width=<span class="hljs-number">576</span>, | |
| height=<span class="hljs-number">1024</span>, | |
| num_inference_steps=<span class="hljs-number">30</span>, | |
| generator=generator | |
| ).images[<span class="hljs-number">0</span>] | |
| image`,wrap:!1}}),Tt=new j({props:{title:"Running FP16 inference",local:"running-fp16-inference",headingTag:"h3"}}),Jt=new U({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwRmx1eFBpcGVsaW5lJTBBJTBBcGlwZSUyMCUzRCUyMEZsdXhQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTIyYmxhY2stZm9yZXN0LWxhYnMlMkZGTFVYLjEtc2NobmVsbCUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYpJTIwJTIzJTIwY2FuJTIwcmVwbGFjZSUyMHNjaG5lbGwlMjB3aXRoJTIwZGV2JTBBJTIzJTIwdG8lMjBydW4lMjBvbiUyMGxvdyUyMHZyYW0lMjBHUFVzJTIwKGkuZS4lMjBiZXR3ZWVuJTIwNCUyMGFuZCUyMDMyJTIwR0IlMjBWUkFNKSUwQXBpcGUuZW5hYmxlX3NlcXVlbnRpYWxfY3B1X29mZmxvYWQoKSUwQXBpcGUudmFlLmVuYWJsZV9zbGljaW5nKCklMEFwaXBlLnZhZS5lbmFibGVfdGlsaW5nKCklMEElMEFwaXBlLnRvKHRvcmNoLmZsb2F0MTYpJTIwJTIzJTIwY2FzdGluZyUyMGhlcmUlMjBpbnN0ZWFkJTIwb2YlMjBpbiUyMHRoZSUyMHBpcGVsaW5lJTIwY29uc3RydWN0b3IlMjBiZWNhdXNlJTIwZG9pbmclMjBzbyUyMGluJTIwdGhlJTIwY29uc3RydWN0b3IlMjBsb2FkcyUyMGFsbCUyMG1vZGVscyUyMGludG8lMjBDUFUlMjBtZW1vcnklMjBhdCUyMG9uY2UlMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJBJTIwY2F0JTIwaG9sZGluZyUyMGElMjBzaWduJTIwdGhhdCUyMHNheXMlMjBoZWxsbyUyMHdvcmxkJTIyJTBBb3V0JTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjBwcm9tcHQlM0Rwcm9tcHQlMkMlMEElMjAlMjAlMjAlMjBndWlkYW5jZV9zY2FsZSUzRDAuJTJDJTBBJTIwJTIwJTIwJTIwaGVpZ2h0JTNENzY4JTJDJTBBJTIwJTIwJTIwJTIwd2lkdGglM0QxMzYwJTJDJTBBJTIwJTIwJTIwJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDQlMkMlMEElMjAlMjAlMjAlMjBtYXhfc2VxdWVuY2VfbGVuZ3RoJTNEMjU2JTJDJTBBKS5pbWFnZXMlNUIwJTVEJTBBb3V0LnNhdmUoJTIyaW1hZ2UucG5nJTIyKQ==",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> FluxPipeline | |
| pipe = FluxPipeline.from_pretrained(<span class="hljs-string">"black-forest-labs/FLUX.1-schnell"</span>, torch_dtype=torch.bfloat16) <span class="hljs-comment"># can replace schnell with dev</span> | |
| <span class="hljs-comment"># to run on low vram GPUs (i.e. between 4 and 32 GB VRAM)</span> | |
| pipe.enable_sequential_cpu_offload() | |
| pipe.vae.enable_slicing() | |
| pipe.vae.enable_tiling() | |
| pipe.to(torch.float16) <span class="hljs-comment"># casting here instead of in the pipeline constructor because doing so in the constructor loads all models into CPU memory at once</span> | |
| prompt = <span class="hljs-string">"A cat holding a sign that says hello world"</span> | |
| out = pipe( | |
| prompt=prompt, | |
| guidance_scale=<span class="hljs-number">0.</span>, | |
| height=<span class="hljs-number">768</span>, | |
| width=<span class="hljs-number">1360</span>, | |
| num_inference_steps=<span class="hljs-number">4</span>, | |
| max_sequence_length=<span class="hljs-number">256</span>, | |
| ).images[<span class="hljs-number">0</span>] | |
| out.save(<span class="hljs-string">"image.png"</span>)`,wrap:!1}}),jt=new j({props:{title:"Quantization",local:"quantization",headingTag:"h3"}}),Zt=new U({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwQml0c0FuZEJ5dGVzQ29uZmlnJTIwYXMlMjBEaWZmdXNlcnNCaXRzQW5kQnl0ZXNDb25maWclMkMlMjBGbHV4VHJhbnNmb3JtZXIyRE1vZGVsJTJDJTIwRmx1eFBpcGVsaW5lJTBBZnJvbSUyMHRyYW5zZm9ybWVycyUyMGltcG9ydCUyMEJpdHNBbmRCeXRlc0NvbmZpZyUyMGFzJTIwQml0c0FuZEJ5dGVzQ29uZmlnJTJDJTIwVDVFbmNvZGVyTW9kZWwlMEElMEFxdWFudF9jb25maWclMjAlM0QlMjBCaXRzQW5kQnl0ZXNDb25maWcobG9hZF9pbl84Yml0JTNEVHJ1ZSklMEF0ZXh0X2VuY29kZXJfOGJpdCUyMCUzRCUyMFQ1RW5jb2Rlck1vZGVsLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJibGFjay1mb3Jlc3QtbGFicyUyRkZMVVguMS1kZXYlMjIlMkMlMEElMjAlMjAlMjAlMjBzdWJmb2xkZXIlM0QlMjJ0ZXh0X2VuY29kZXJfMiUyMiUyQyUwQSUyMCUyMCUyMCUyMHF1YW50aXphdGlvbl9jb25maWclM0RxdWFudF9jb25maWclMkMlMEElMjAlMjAlMjAlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmZsb2F0MTYlMkMlMEEpJTBBJTBBcXVhbnRfY29uZmlnJTIwJTNEJTIwRGlmZnVzZXJzQml0c0FuZEJ5dGVzQ29uZmlnKGxvYWRfaW5fOGJpdCUzRFRydWUpJTBBdHJhbnNmb3JtZXJfOGJpdCUyMCUzRCUyMEZsdXhUcmFuc2Zvcm1lcjJETW9kZWwuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMmJsYWNrLWZvcmVzdC1sYWJzJTJGRkxVWC4xLWRldiUyMiUyQyUwQSUyMCUyMCUyMCUyMHN1YmZvbGRlciUzRCUyMnRyYW5zZm9ybWVyJTIyJTJDJTBBJTIwJTIwJTIwJTIwcXVhbnRpemF0aW9uX2NvbmZpZyUzRHF1YW50X2NvbmZpZyUyQyUwQSUyMCUyMCUyMCUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUyQyUwQSklMEElMEFwaXBlbGluZSUyMCUzRCUyMEZsdXhQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyYmxhY2stZm9yZXN0LWxhYnMlMkZGTFVYLjEtZGV2JTIyJTJDJTBBJTIwJTIwJTIwJTIwdGV4dF9lbmNvZGVyXzIlM0R0ZXh0X2VuY29kZXJfOGJpdCUyQyUwQSUyMCUyMCUyMCUyMHRyYW5zZm9ybWVyJTNEdHJhbnNmb3JtZXJfOGJpdCUyQyUwQSUyMCUyMCUyMCUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUyQyUwQSUyMCUyMCUyMCUyMGRldmljZV9tYXAlM0QlMjJiYWxhbmNlZCUyMiUyQyUwQSklMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJhJTIwdGlueSUyMGFzdHJvbmF1dCUyMGhhdGNoaW5nJTIwZnJvbSUyMGFuJTIwZWdnJTIwb24lMjB0aGUlMjBtb29uJTIyJTBBaW1hZ2UlMjAlM0QlMjBwaXBlbGluZShwcm9tcHQlMkMlMjBndWlkYW5jZV9zY2FsZSUzRDMuNSUyQyUyMGhlaWdodCUzRDc2OCUyQyUyMHdpZHRoJTNEMTM2MCUyQyUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0Q1MCkuaW1hZ2VzJTVCMCU1RCUwQWltYWdlLnNhdmUoJTIyZmx1eC5wbmclMjIp",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> BitsAndBytesConfig <span class="hljs-keyword">as</span> DiffusersBitsAndBytesConfig, FluxTransformer2DModel, FluxPipeline | |
| <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BitsAndBytesConfig <span class="hljs-keyword">as</span> BitsAndBytesConfig, T5EncoderModel | |
| quant_config = BitsAndBytesConfig(load_in_8bit=<span class="hljs-literal">True</span>) | |
| text_encoder_8bit = T5EncoderModel.from_pretrained( | |
| <span class="hljs-string">"black-forest-labs/FLUX.1-dev"</span>, | |
| subfolder=<span class="hljs-string">"text_encoder_2"</span>, | |
| quantization_config=quant_config, | |
| torch_dtype=torch.float16, | |
| ) | |
| quant_config = DiffusersBitsAndBytesConfig(load_in_8bit=<span class="hljs-literal">True</span>) | |
| transformer_8bit = FluxTransformer2DModel.from_pretrained( | |
| <span class="hljs-string">"black-forest-labs/FLUX.1-dev"</span>, | |
| subfolder=<span class="hljs-string">"transformer"</span>, | |
| quantization_config=quant_config, | |
| torch_dtype=torch.float16, | |
| ) | |
| pipeline = FluxPipeline.from_pretrained( | |
| <span class="hljs-string">"black-forest-labs/FLUX.1-dev"</span>, | |
| text_encoder_2=text_encoder_8bit, | |
| transformer=transformer_8bit, | |
| torch_dtype=torch.float16, | |
| device_map=<span class="hljs-string">"balanced"</span>, | |
| ) | |
| prompt = <span class="hljs-string">"a tiny astronaut hatching from an egg on the moon"</span> | |
| image = pipeline(prompt, guidance_scale=<span class="hljs-number">3.5</span>, height=<span class="hljs-number">768</span>, width=<span class="hljs-number">1360</span>, num_inference_steps=<span class="hljs-number">50</span>).images[<span class="hljs-number">0</span>] | |
| image.save(<span class="hljs-string">"flux.png"</span>)`,wrap:!1}}),kt=new j({props:{title:"Single File Loading for the FluxTransformer2DModel",local:"single-file-loading-for-the-fluxtransformer2dmodel",headingTag:"h2"}}),de=new Na({props:{$$slots:{default:[Oi]},$$scope:{ctx:J}}}),Bt=new U({props:{code:"cGlwJTIwaW5zdGFsbCUyMG9wdGltdW0tcXVhbnRv",highlighted:"pip install optimum-quanto",wrap:!1}}),Pt=new U({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwRmx1eFRyYW5zZm9ybWVyMkRNb2RlbCUyQyUyMEZsdXhQaXBlbGluZSUwQWZyb20lMjB0cmFuc2Zvcm1lcnMlMjBpbXBvcnQlMjBUNUVuY29kZXJNb2RlbCUyQyUyMENMSVBUZXh0TW9kZWwlMEFmcm9tJTIwb3B0aW11bS5xdWFudG8lMjBpbXBvcnQlMjBmcmVlemUlMkMlMjBxZmxvYXQ4JTJDJTIwcXVhbnRpemUlMEElMEFiZmxfcmVwbyUyMCUzRCUyMCUyMmJsYWNrLWZvcmVzdC1sYWJzJTJGRkxVWC4xLWRldiUyMiUwQWR0eXBlJTIwJTNEJTIwdG9yY2guYmZsb2F0MTYlMEElMEF0cmFuc2Zvcm1lciUyMCUzRCUyMEZsdXhUcmFuc2Zvcm1lcjJETW9kZWwuZnJvbV9zaW5nbGVfZmlsZSglMjJodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGS2lqYWklMkZmbHV4LWZwOCUyRmJsb2IlMkZtYWluJTJGZmx1eDEtZGV2LWZwOC5zYWZldGVuc29ycyUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEZHR5cGUpJTBBcXVhbnRpemUodHJhbnNmb3JtZXIlMkMlMjB3ZWlnaHRzJTNEcWZsb2F0OCklMEFmcmVlemUodHJhbnNmb3JtZXIpJTBBJTBBdGV4dF9lbmNvZGVyXzIlMjAlM0QlMjBUNUVuY29kZXJNb2RlbC5mcm9tX3ByZXRyYWluZWQoYmZsX3JlcG8lMkMlMjBzdWJmb2xkZXIlM0QlMjJ0ZXh0X2VuY29kZXJfMiUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEZHR5cGUpJTBBcXVhbnRpemUodGV4dF9lbmNvZGVyXzIlMkMlMjB3ZWlnaHRzJTNEcWZsb2F0OCklMEFmcmVlemUodGV4dF9lbmNvZGVyXzIpJTBBJTBBcGlwZSUyMCUzRCUyMEZsdXhQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoYmZsX3JlcG8lMkMlMjB0cmFuc2Zvcm1lciUzRE5vbmUlMkMlMjB0ZXh0X2VuY29kZXJfMiUzRE5vbmUlMkMlMjB0b3JjaF9kdHlwZSUzRGR0eXBlKSUwQXBpcGUudHJhbnNmb3JtZXIlMjAlM0QlMjB0cmFuc2Zvcm1lciUwQXBpcGUudGV4dF9lbmNvZGVyXzIlMjAlM0QlMjB0ZXh0X2VuY29kZXJfMiUwQSUwQXBpcGUuZW5hYmxlX21vZGVsX2NwdV9vZmZsb2FkKCklMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJBJTIwY2F0JTIwaG9sZGluZyUyMGElMjBzaWduJTIwdGhhdCUyMHNheXMlMjBoZWxsbyUyMHdvcmxkJTIyJTBBaW1hZ2UlMjAlM0QlMjBwaXBlKCUwQSUyMCUyMCUyMCUyMHByb21wdCUyQyUwQSUyMCUyMCUyMCUyMGd1aWRhbmNlX3NjYWxlJTNEMy41JTJDJTBBJTIwJTIwJTIwJTIwb3V0cHV0X3R5cGUlM0QlMjJwaWwlMjIlMkMlMEElMjAlMjAlMjAlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNEMjAlMkMlMEElMjAlMjAlMjAlMjBnZW5lcmF0b3IlM0R0b3JjaC5HZW5lcmF0b3IoJTIyY3B1JTIyKS5tYW51YWxfc2VlZCgwKSUwQSkuaW1hZ2VzJTVCMCU1RCUwQSUwQWltYWdlLnNhdmUoJTIyZmx1eC1mcDgtZGV2LnBuZyUyMik=",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> FluxTransformer2DModel, FluxPipeline | |
| <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5EncoderModel, CLIPTextModel | |
| <span class="hljs-keyword">from</span> optimum.quanto <span class="hljs-keyword">import</span> freeze, qfloat8, quantize | |
| bfl_repo = <span class="hljs-string">"black-forest-labs/FLUX.1-dev"</span> | |
| dtype = torch.bfloat16 | |
| transformer = FluxTransformer2DModel.from_single_file(<span class="hljs-string">"https://huggingface.co/Kijai/flux-fp8/blob/main/flux1-dev-fp8.safetensors"</span>, torch_dtype=dtype) | |
| quantize(transformer, weights=qfloat8) | |
| freeze(transformer) | |
| text_encoder_2 = T5EncoderModel.from_pretrained(bfl_repo, subfolder=<span class="hljs-string">"text_encoder_2"</span>, torch_dtype=dtype) | |
| quantize(text_encoder_2, weights=qfloat8) | |
| freeze(text_encoder_2) | |
| pipe = FluxPipeline.from_pretrained(bfl_repo, transformer=<span class="hljs-literal">None</span>, text_encoder_2=<span class="hljs-literal">None</span>, torch_dtype=dtype) | |
| pipe.transformer = transformer | |
| pipe.text_encoder_2 = text_encoder_2 | |
| pipe.enable_model_cpu_offload() | |
| prompt = <span class="hljs-string">"A cat holding a sign that says hello world"</span> | |
| image = pipe( | |
| prompt, | |
| guidance_scale=<span class="hljs-number">3.5</span>, | |
| output_type=<span class="hljs-string">"pil"</span>, | |
| num_inference_steps=<span class="hljs-number">20</span>, | |
| generator=torch.Generator(<span class="hljs-string">"cpu"</span>).manual_seed(<span class="hljs-number">0</span>) | |
| ).images[<span class="hljs-number">0</span>] | |
| image.save(<span class="hljs-string">"flux-fp8-dev.png"</span>)`,wrap:!1}}),$t=new j({props:{title:"FluxPipeline",local:"diffusers.FluxPipeline",headingTag:"h2"}}),Lt=new v({props:{name:"class diffusers.FluxPipeline",anchor:"diffusers.FluxPipeline",parameters:[{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKL"},{name:"text_encoder",val:": CLIPTextModel"},{name:"tokenizer",val:": CLIPTokenizer"},{name:"text_encoder_2",val:": T5EncoderModel"},{name:"tokenizer_2",val:": T5TokenizerFast"},{name:"transformer",val:": FluxTransformer2DModel"},{name:"image_encoder",val:": CLIPVisionModelWithProjection = None"},{name:"feature_extractor",val:": CLIPImageProcessor = None"}],parametersDescription:[{anchor:"diffusers.FluxPipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_11739/en/api/models/flux_transformer#diffusers.FluxTransformer2DModel">FluxTransformer2DModel</a>) — | |
| Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.`,name:"transformer"},{anchor:"diffusers.FluxPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_11739/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) — | |
| A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.FluxPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_11739/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.FluxPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>CLIPTextModel</code>) — | |
| <a href="https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel" rel="nofollow">CLIP</a>, specifically | |
| the <a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a> variant.`,name:"text_encoder"},{anchor:"diffusers.FluxPipeline.text_encoder_2",description:`<strong>text_encoder_2</strong> (<code>T5EncoderModel</code>) — | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel" rel="nofollow">T5</a>, specifically | |
| the <a href="https://huggingface.co/google/t5-v1_1-xxl" rel="nofollow">google/t5-v1_1-xxl</a> variant.`,name:"text_encoder_2"},{anchor:"diffusers.FluxPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>CLIPTokenizer</code>) — | |
| Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"},{anchor:"diffusers.FluxPipeline.tokenizer_2",description:`<strong>tokenizer_2</strong> (<code>T5TokenizerFast</code>) — | |
| Second Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast" rel="nofollow">T5TokenizerFast</a>.`,name:"tokenizer_2"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux.py#L146"}}),Rt=new v({props:{name:"__call__",anchor:"diffusers.FluxPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"prompt_2",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"negative_prompt_2",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"true_cfg_scale",val:": float = 1.0"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"num_inference_steps",val:": int = 28"},{name:"sigmas",val:": typing.Optional[typing.List[float]] = None"},{name:"guidance_scale",val:": float = 3.5"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.FloatTensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"pooled_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"ip_adapter_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor], NoneType] = None"},{name:"ip_adapter_image_embeds",val:": typing.Optional[typing.List[torch.Tensor]] = None"},{name:"negative_ip_adapter_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor], NoneType] = None"},{name:"negative_ip_adapter_image_embeds",val:": typing.Optional[typing.List[torch.Tensor]] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"negative_pooled_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"joint_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 512"}],parametersDescription:[{anchor:"diffusers.FluxPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.FluxPipeline.__call__.prompt_2",description:`<strong>prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to be sent to <code>tokenizer_2</code> and <code>text_encoder_2</code>. If not defined, <code>prompt</code> is | |
| will be used instead.`,name:"prompt_2"},{anchor:"diffusers.FluxPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>true_cfg_scale</code> is | |
| not greater than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.FluxPipeline.__call__.negative_prompt_2",description:`<strong>negative_prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation to be sent to <code>tokenizer_2</code> and | |
| <code>text_encoder_2</code>. If not defined, <code>negative_prompt</code> is used in all the text-encoders.`,name:"negative_prompt_2"},{anchor:"diffusers.FluxPipeline.__call__.true_cfg_scale",description:`<strong>true_cfg_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) — | |
| When > 1.0 and a provided <code>negative_prompt</code>, enables true classifier-free guidance.`,name:"true_cfg_scale"},{anchor:"diffusers.FluxPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The height in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"height"},{anchor:"diffusers.FluxPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The width in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"width"},{anchor:"diffusers.FluxPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.FluxPipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) — | |
| Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in | |
| their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed | |
| will be used.`,name:"sigmas"},{anchor:"diffusers.FluxPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 3.5) — | |
| Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion | |
| Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2. | |
| of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting | |
| <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images that are closely linked to | |
| the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.FluxPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.FluxPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.FluxPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will be generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.FluxPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.FluxPipeline.__call__.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. | |
| If not provided, pooled text embeddings will be generated from <code>prompt</code> input argument.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.FluxPipeline.__call__.ip_adapter_image",description:"<strong>ip_adapter_image</strong> — (<code>PipelineImageInput</code>, <em>optional</em>): Optional image input to work with IP Adapters.",name:"ip_adapter_image"},{anchor:"diffusers.FluxPipeline.__call__.ip_adapter_image_embeds",description:`<strong>ip_adapter_image_embeds</strong> (<code>List[torch.Tensor]</code>, <em>optional</em>) — | |
| Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of | |
| IP-adapters. Each element should be a tensor of shape <code>(batch_size, num_images, emb_dim)</code>. If not | |
| provided, embeddings are computed from the <code>ip_adapter_image</code> input argument.`,name:"ip_adapter_image_embeds"},{anchor:"diffusers.FluxPipeline.__call__.negative_ip_adapter_image",description:`<strong>negative_ip_adapter_image</strong> — | |
| (<code>PipelineImageInput</code>, <em>optional</em>): Optional image input to work with IP Adapters.`,name:"negative_ip_adapter_image"},{anchor:"diffusers.FluxPipeline.__call__.negative_ip_adapter_image_embeds",description:`<strong>negative_ip_adapter_image_embeds</strong> (<code>List[torch.Tensor]</code>, <em>optional</em>) — | |
| Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of | |
| IP-adapters. Each element should be a tensor of shape <code>(batch_size, num_images, emb_dim)</code>. If not | |
| provided, embeddings are computed from the <code>ip_adapter_image</code> input argument.`,name:"negative_ip_adapter_image_embeds"},{anchor:"diffusers.FluxPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input | |
| argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.FluxPipeline.__call__.negative_pooled_prompt_embeds",description:`<strong>negative_pooled_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt | |
| weighting. If not provided, pooled negative_prompt_embeds will be generated from <code>negative_prompt</code> | |
| input argument.`,name:"negative_pooled_prompt_embeds"},{anchor:"diffusers.FluxPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.FluxPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.flux.FluxPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.FluxPipeline.__call__.joint_attention_kwargs",description:`<strong>joint_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"joint_attention_kwargs"},{anchor:"diffusers.FluxPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.FluxPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.FluxPipeline.__call__.max_sequence_length",description:"<strong>max_sequence_length</strong> (<code>int</code> defaults to 512) — Maximum sequence length to use with the <code>prompt</code>.",name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux.py#L627",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.flux.FluxPipelineOutput</code> if <code>return_dict</code> | |
| is True, otherwise a <code>tuple</code>. When returning a tuple, the first element is a list with the generated | |
| images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.flux.FluxPipelineOutput</code> or <code>tuple</code></p> | |
| `}}),ce=new ae({props:{anchor:"diffusers.FluxPipeline.__call__.example",$$slots:{default:[Ki]},$$scope:{ctx:J}}}),Xt=new v({props:{name:"disable_vae_slicing",anchor:"diffusers.FluxPipeline.disable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux.py#L550"}}),Et=new v({props:{name:"disable_vae_tiling",anchor:"diffusers.FluxPipeline.disable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux.py#L565"}}),Vt=new v({props:{name:"enable_vae_slicing",anchor:"diffusers.FluxPipeline.enable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux.py#L543"}}),Yt=new v({props:{name:"enable_vae_tiling",anchor:"diffusers.FluxPipeline.enable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux.py#L557"}}),zt=new v({props:{name:"encode_prompt",anchor:"diffusers.FluxPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"prompt_2",val:": typing.Union[str, typing.List[str]]"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"pooled_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"max_sequence_length",val:": int = 512"},{name:"lora_scale",val:": typing.Optional[float] = None"}],parametersDescription:[{anchor:"diffusers.FluxPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.FluxPipeline.encode_prompt.prompt_2",description:`<strong>prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to be sent to the <code>tokenizer_2</code> and <code>text_encoder_2</code>. If not defined, <code>prompt</code> is | |
| used in all text-encoders`,name:"prompt_2"},{anchor:"diffusers.FluxPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>): | |
| torch device`,name:"device"},{anchor:"diffusers.FluxPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.FluxPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.FluxPipeline.encode_prompt.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. | |
| If not provided, pooled text embeddings will be generated from <code>prompt</code> input argument.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.FluxPipeline.encode_prompt.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>, <em>optional</em>) — | |
| A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.`,name:"lora_scale"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux.py#L310"}}),Qt=new j({props:{title:"FluxImg2ImgPipeline",local:"diffusers.FluxImg2ImgPipeline",headingTag:"h2"}}),At=new v({props:{name:"class diffusers.FluxImg2ImgPipeline",anchor:"diffusers.FluxImg2ImgPipeline",parameters:[{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKL"},{name:"text_encoder",val:": CLIPTextModel"},{name:"tokenizer",val:": CLIPTokenizer"},{name:"text_encoder_2",val:": T5EncoderModel"},{name:"tokenizer_2",val:": T5TokenizerFast"},{name:"transformer",val:": FluxTransformer2DModel"},{name:"image_encoder",val:": CLIPVisionModelWithProjection = None"},{name:"feature_extractor",val:": CLIPImageProcessor = None"}],parametersDescription:[{anchor:"diffusers.FluxImg2ImgPipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_11739/en/api/models/flux_transformer#diffusers.FluxTransformer2DModel">FluxTransformer2DModel</a>) — | |
| Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.`,name:"transformer"},{anchor:"diffusers.FluxImg2ImgPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_11739/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) — | |
| A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.FluxImg2ImgPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_11739/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.FluxImg2ImgPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>CLIPTextModel</code>) — | |
| <a href="https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel" rel="nofollow">CLIP</a>, specifically | |
| the <a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a> variant.`,name:"text_encoder"},{anchor:"diffusers.FluxImg2ImgPipeline.text_encoder_2",description:`<strong>text_encoder_2</strong> (<code>T5EncoderModel</code>) — | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel" rel="nofollow">T5</a>, specifically | |
| the <a href="https://huggingface.co/google/t5-v1_1-xxl" rel="nofollow">google/t5-v1_1-xxl</a> variant.`,name:"text_encoder_2"},{anchor:"diffusers.FluxImg2ImgPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>CLIPTokenizer</code>) — | |
| Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"},{anchor:"diffusers.FluxImg2ImgPipeline.tokenizer_2",description:`<strong>tokenizer_2</strong> (<code>T5TokenizerFast</code>) — | |
| Second Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast" rel="nofollow">T5TokenizerFast</a>.`,name:"tokenizer_2"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_img2img.py#L169"}}),Ht=new v({props:{name:"__call__",anchor:"diffusers.FluxImg2ImgPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"prompt_2",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"negative_prompt_2",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"true_cfg_scale",val:": float = 1.0"},{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"strength",val:": float = 0.6"},{name:"num_inference_steps",val:": int = 28"},{name:"sigmas",val:": typing.Optional[typing.List[float]] = None"},{name:"guidance_scale",val:": float = 7.0"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.FloatTensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"pooled_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"ip_adapter_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor], NoneType] = None"},{name:"ip_adapter_image_embeds",val:": typing.Optional[typing.List[torch.Tensor]] = None"},{name:"negative_ip_adapter_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor], NoneType] = None"},{name:"negative_ip_adapter_image_embeds",val:": typing.Optional[typing.List[torch.Tensor]] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"negative_pooled_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"joint_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 512"}],parametersDescription:[{anchor:"diffusers.FluxImg2ImgPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.FluxImg2ImgPipeline.__call__.prompt_2",description:`<strong>prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to be sent to <code>tokenizer_2</code> and <code>text_encoder_2</code>. If not defined, <code>prompt</code> is | |
| will be used instead`,name:"prompt_2"},{anchor:"diffusers.FluxImg2ImgPipeline.__call__.image",description:`<strong>image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, or <code>List[np.ndarray]</code>) — | |
| <code>Image</code>, numpy array or tensor representing an image batch to be used as the starting point. For both | |
| numpy array and pytorch tensor, the expected value range is between <code>[0, 1]</code> If it’s a tensor or a list | |
| or tensors, the expected shape should be <code>(B, C, H, W)</code> or <code>(C, H, W)</code>. If it is a numpy array or a | |
| list of arrays, the expected shape should be <code>(B, H, W, C)</code> or <code>(H, W, C)</code> It can also accept image | |
| latents as <code>image</code>, but if passing latents directly it is not encoded again.`,name:"image"},{anchor:"diffusers.FluxImg2ImgPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The height in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"height"},{anchor:"diffusers.FluxImg2ImgPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The width in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"width"},{anchor:"diffusers.FluxImg2ImgPipeline.__call__.strength",description:`<strong>strength</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) — | |
| Indicates extent to transform the reference <code>image</code>. Must be between 0 and 1. <code>image</code> is used as a | |
| starting point and more noise is added the higher the <code>strength</code>. The number of denoising steps depends | |
| on the amount of noise initially added. When <code>strength</code> is 1, added noise is maximum and the denoising | |
| process runs for the full number of iterations specified in <code>num_inference_steps</code>. A value of 1 | |
| essentially ignores <code>image</code>.`,name:"strength"},{anchor:"diffusers.FluxImg2ImgPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.FluxImg2ImgPipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) — | |
| Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in | |
| their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed | |
| will be used.`,name:"sigmas"},{anchor:"diffusers.FluxImg2ImgPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 7.0) — | |
| Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion | |
| Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2. | |
| of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting | |
| <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images that are closely linked to | |
| the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.FluxImg2ImgPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.FluxImg2ImgPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.FluxImg2ImgPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will ge generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.FluxImg2ImgPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.FluxImg2ImgPipeline.__call__.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. | |
| If not provided, pooled text embeddings will be generated from <code>prompt</code> input argument.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.FluxImg2ImgPipeline.__call__.ip_adapter_image",description:"<strong>ip_adapter_image</strong> — (<code>PipelineImageInput</code>, <em>optional</em>): Optional image input to work with IP Adapters.",name:"ip_adapter_image"},{anchor:"diffusers.FluxImg2ImgPipeline.__call__.ip_adapter_image_embeds",description:`<strong>ip_adapter_image_embeds</strong> (<code>List[torch.Tensor]</code>, <em>optional</em>) — | |
| Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of | |
| IP-adapters. Each element should be a tensor of shape <code>(batch_size, num_images, emb_dim)</code>. If not | |
| provided, embeddings are computed from the <code>ip_adapter_image</code> input argument.`,name:"ip_adapter_image_embeds"},{anchor:"diffusers.FluxImg2ImgPipeline.__call__.negative_ip_adapter_image",description:`<strong>negative_ip_adapter_image</strong> — | |
| (<code>PipelineImageInput</code>, <em>optional</em>): Optional image input to work with IP Adapters.`,name:"negative_ip_adapter_image"},{anchor:"diffusers.FluxImg2ImgPipeline.__call__.negative_ip_adapter_image_embeds",description:`<strong>negative_ip_adapter_image_embeds</strong> (<code>List[torch.Tensor]</code>, <em>optional</em>) — | |
| Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of | |
| IP-adapters. Each element should be a tensor of shape <code>(batch_size, num_images, emb_dim)</code>. If not | |
| provided, embeddings are computed from the <code>ip_adapter_image</code> input argument.`,name:"negative_ip_adapter_image_embeds"},{anchor:"diffusers.FluxImg2ImgPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.FluxImg2ImgPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.flux.FluxPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.FluxImg2ImgPipeline.__call__.joint_attention_kwargs",description:`<strong>joint_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"joint_attention_kwargs"},{anchor:"diffusers.FluxImg2ImgPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.FluxImg2ImgPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.FluxImg2ImgPipeline.__call__.max_sequence_length",description:"<strong>max_sequence_length</strong> (<code>int</code> defaults to 512) — Maximum sequence length to use with the <code>prompt</code>.",name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_img2img.py#L709",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.flux.FluxPipelineOutput</code> if <code>return_dict</code> | |
| is True, otherwise a <code>tuple</code>. When returning a tuple, the first element is a list with the generated | |
| images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.flux.FluxPipelineOutput</code> or <code>tuple</code></p> | |
| `}}),he=new ae({props:{anchor:"diffusers.FluxImg2ImgPipeline.__call__.example",$$slots:{default:[ep]},$$scope:{ctx:J}}}),St=new v({props:{name:"disable_vae_slicing",anchor:"diffusers.FluxImg2ImgPipeline.disable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_img2img.py#L619"}}),qt=new v({props:{name:"disable_vae_tiling",anchor:"diffusers.FluxImg2ImgPipeline.disable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_img2img.py#L636"}}),Dt=new v({props:{name:"enable_vae_slicing",anchor:"diffusers.FluxImg2ImgPipeline.enable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_img2img.py#L611"}}),Ot=new v({props:{name:"enable_vae_tiling",anchor:"diffusers.FluxImg2ImgPipeline.enable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_img2img.py#L627"}}),Kt=new v({props:{name:"encode_prompt",anchor:"diffusers.FluxImg2ImgPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"prompt_2",val:": typing.Union[str, typing.List[str]]"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"pooled_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"max_sequence_length",val:": int = 512"},{name:"lora_scale",val:": typing.Optional[float] = None"}],parametersDescription:[{anchor:"diffusers.FluxImg2ImgPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.FluxImg2ImgPipeline.encode_prompt.prompt_2",description:`<strong>prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to be sent to the <code>tokenizer_2</code> and <code>text_encoder_2</code>. If not defined, <code>prompt</code> is | |
| used in all text-encoders`,name:"prompt_2"},{anchor:"diffusers.FluxImg2ImgPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>): | |
| torch device`,name:"device"},{anchor:"diffusers.FluxImg2ImgPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.FluxImg2ImgPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.FluxImg2ImgPipeline.encode_prompt.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. | |
| If not provided, pooled text embeddings will be generated from <code>prompt</code> input argument.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.FluxImg2ImgPipeline.encode_prompt.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>, <em>optional</em>) — | |
| A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.`,name:"lora_scale"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_img2img.py#L333"}}),en=new j({props:{title:"FluxInpaintPipeline",local:"diffusers.FluxInpaintPipeline",headingTag:"h2"}}),tn=new v({props:{name:"class diffusers.FluxInpaintPipeline",anchor:"diffusers.FluxInpaintPipeline",parameters:[{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKL"},{name:"text_encoder",val:": CLIPTextModel"},{name:"tokenizer",val:": CLIPTokenizer"},{name:"text_encoder_2",val:": T5EncoderModel"},{name:"tokenizer_2",val:": T5TokenizerFast"},{name:"transformer",val:": FluxTransformer2DModel"},{name:"image_encoder",val:": CLIPVisionModelWithProjection = None"},{name:"feature_extractor",val:": CLIPImageProcessor = None"}],parametersDescription:[{anchor:"diffusers.FluxInpaintPipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_11739/en/api/models/flux_transformer#diffusers.FluxTransformer2DModel">FluxTransformer2DModel</a>) — | |
| Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.`,name:"transformer"},{anchor:"diffusers.FluxInpaintPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_11739/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) — | |
| A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.FluxInpaintPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_11739/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.FluxInpaintPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>CLIPTextModel</code>) — | |
| <a href="https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel" rel="nofollow">CLIP</a>, specifically | |
| the <a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a> variant.`,name:"text_encoder"},{anchor:"diffusers.FluxInpaintPipeline.text_encoder_2",description:`<strong>text_encoder_2</strong> (<code>T5EncoderModel</code>) — | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel" rel="nofollow">T5</a>, specifically | |
| the <a href="https://huggingface.co/google/t5-v1_1-xxl" rel="nofollow">google/t5-v1_1-xxl</a> variant.`,name:"text_encoder_2"},{anchor:"diffusers.FluxInpaintPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>CLIPTokenizer</code>) — | |
| Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"},{anchor:"diffusers.FluxInpaintPipeline.tokenizer_2",description:`<strong>tokenizer_2</strong> (<code>T5TokenizerFast</code>) — | |
| Second Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast" rel="nofollow">T5TokenizerFast</a>.`,name:"tokenizer_2"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_inpaint.py#L166"}}),nn=new v({props:{name:"__call__",anchor:"diffusers.FluxInpaintPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"prompt_2",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"negative_prompt_2",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"true_cfg_scale",val:": float = 1.0"},{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"mask_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"masked_image_latents",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"padding_mask_crop",val:": typing.Optional[int] = None"},{name:"strength",val:": float = 0.6"},{name:"num_inference_steps",val:": int = 28"},{name:"sigmas",val:": typing.Optional[typing.List[float]] = None"},{name:"guidance_scale",val:": float = 7.0"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.FloatTensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"pooled_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"ip_adapter_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor], NoneType] = None"},{name:"ip_adapter_image_embeds",val:": typing.Optional[typing.List[torch.Tensor]] = None"},{name:"negative_ip_adapter_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor], NoneType] = None"},{name:"negative_ip_adapter_image_embeds",val:": typing.Optional[typing.List[torch.Tensor]] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"negative_pooled_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"joint_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 512"}],parametersDescription:[{anchor:"diffusers.FluxInpaintPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.FluxInpaintPipeline.__call__.prompt_2",description:`<strong>prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to be sent to <code>tokenizer_2</code> and <code>text_encoder_2</code>. If not defined, <code>prompt</code> is | |
| will be used instead`,name:"prompt_2"},{anchor:"diffusers.FluxInpaintPipeline.__call__.image",description:`<strong>image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, or <code>List[np.ndarray]</code>) — | |
| <code>Image</code>, numpy array or tensor representing an image batch to be used as the starting point. For both | |
| numpy array and pytorch tensor, the expected value range is between <code>[0, 1]</code> If it’s a tensor or a list | |
| or tensors, the expected shape should be <code>(B, C, H, W)</code> or <code>(C, H, W)</code>. If it is a numpy array or a | |
| list of arrays, the expected shape should be <code>(B, H, W, C)</code> or <code>(H, W, C)</code> It can also accept image | |
| latents as <code>image</code>, but if passing latents directly it is not encoded again.`,name:"image"},{anchor:"diffusers.FluxInpaintPipeline.__call__.mask_image",description:`<strong>mask_image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, or <code>List[np.ndarray]</code>) — | |
| <code>Image</code>, numpy array or tensor representing an image batch to mask <code>image</code>. White pixels in the mask | |
| are repainted while black pixels are preserved. If <code>mask_image</code> is a PIL image, it is converted to a | |
| single channel (luminance) before use. If it’s a numpy array or pytorch tensor, it should contain one | |
| color channel (L) instead of 3, so the expected shape for pytorch tensor would be <code>(B, 1, H, W)</code>, <code>(B, H, W)</code>, <code>(1, H, W)</code>, <code>(H, W)</code>. And for numpy array would be for <code>(B, H, W, 1)</code>, <code>(B, H, W)</code>, <code>(H, W, 1)</code>, or <code>(H, W)</code>.`,name:"mask_image"},{anchor:"diffusers.FluxInpaintPipeline.__call__.mask_image_latent",description:`<strong>mask_image_latent</strong> (<code>torch.Tensor</code>, <code>List[torch.Tensor]</code>) — | |
| <code>Tensor</code> representing an image batch to mask <code>image</code> generated by VAE. If not provided, the mask | |
| latents tensor will ge generated by <code>mask_image</code>.`,name:"mask_image_latent"},{anchor:"diffusers.FluxInpaintPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The height in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"height"},{anchor:"diffusers.FluxInpaintPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The width in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"width"},{anchor:"diffusers.FluxInpaintPipeline.__call__.padding_mask_crop",description:`<strong>padding_mask_crop</strong> (<code>int</code>, <em>optional</em>, defaults to <code>None</code>) — | |
| The size of margin in the crop to be applied to the image and masking. If <code>None</code>, no crop is applied to | |
| image and mask_image. If <code>padding_mask_crop</code> is not <code>None</code>, it will first find a rectangular region | |
| with the same aspect ration of the image and contains all masked area, and then expand that area based | |
| on <code>padding_mask_crop</code>. The image and mask_image will then be cropped based on the expanded area before | |
| resizing to the original image size for inpainting. This is useful when the masked area is small while | |
| the image is large and contain information irrelevant for inpainting, such as background.`,name:"padding_mask_crop"},{anchor:"diffusers.FluxInpaintPipeline.__call__.strength",description:`<strong>strength</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) — | |
| Indicates extent to transform the reference <code>image</code>. Must be between 0 and 1. <code>image</code> is used as a | |
| starting point and more noise is added the higher the <code>strength</code>. The number of denoising steps depends | |
| on the amount of noise initially added. When <code>strength</code> is 1, added noise is maximum and the denoising | |
| process runs for the full number of iterations specified in <code>num_inference_steps</code>. A value of 1 | |
| essentially ignores <code>image</code>.`,name:"strength"},{anchor:"diffusers.FluxInpaintPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.FluxInpaintPipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) — | |
| Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in | |
| their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed | |
| will be used.`,name:"sigmas"},{anchor:"diffusers.FluxInpaintPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 7.0) — | |
| Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion | |
| Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2. | |
| of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting | |
| <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images that are closely linked to | |
| the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.FluxInpaintPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.FluxInpaintPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.FluxInpaintPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will ge generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.FluxInpaintPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.FluxInpaintPipeline.__call__.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. | |
| If not provided, pooled text embeddings will be generated from <code>prompt</code> input argument.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.FluxInpaintPipeline.__call__.ip_adapter_image",description:"<strong>ip_adapter_image</strong> — (<code>PipelineImageInput</code>, <em>optional</em>): Optional image input to work with IP Adapters.",name:"ip_adapter_image"},{anchor:"diffusers.FluxInpaintPipeline.__call__.ip_adapter_image_embeds",description:`<strong>ip_adapter_image_embeds</strong> (<code>List[torch.Tensor]</code>, <em>optional</em>) — | |
| Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of | |
| IP-adapters. Each element should be a tensor of shape <code>(batch_size, num_images, emb_dim)</code>. If not | |
| provided, embeddings are computed from the <code>ip_adapter_image</code> input argument.`,name:"ip_adapter_image_embeds"},{anchor:"diffusers.FluxInpaintPipeline.__call__.negative_ip_adapter_image",description:`<strong>negative_ip_adapter_image</strong> — | |
| (<code>PipelineImageInput</code>, <em>optional</em>): Optional image input to work with IP Adapters.`,name:"negative_ip_adapter_image"},{anchor:"diffusers.FluxInpaintPipeline.__call__.negative_ip_adapter_image_embeds",description:`<strong>negative_ip_adapter_image_embeds</strong> (<code>List[torch.Tensor]</code>, <em>optional</em>) — | |
| Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of | |
| IP-adapters. Each element should be a tensor of shape <code>(batch_size, num_images, emb_dim)</code>. If not | |
| provided, embeddings are computed from the <code>ip_adapter_image</code> input argument.`,name:"negative_ip_adapter_image_embeds"},{anchor:"diffusers.FluxInpaintPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.FluxInpaintPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.flux.FluxPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.FluxInpaintPipeline.__call__.joint_attention_kwargs",description:`<strong>joint_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"joint_attention_kwargs"},{anchor:"diffusers.FluxInpaintPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.FluxInpaintPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.FluxInpaintPipeline.__call__.max_sequence_length",description:"<strong>max_sequence_length</strong> (<code>int</code> defaults to 512) — Maximum sequence length to use with the <code>prompt</code>.",name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_inpaint.py#L775",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.flux.FluxPipelineOutput</code> if <code>return_dict</code> | |
| is True, otherwise a <code>tuple</code>. When returning a tuple, the first element is a list with the generated | |
| images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.flux.FluxPipelineOutput</code> or <code>tuple</code></p> | |
| `}}),xe=new ae({props:{anchor:"diffusers.FluxInpaintPipeline.__call__.example",$$slots:{default:[tp]},$$scope:{ctx:J}}}),on=new v({props:{name:"encode_prompt",anchor:"diffusers.FluxInpaintPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"prompt_2",val:": typing.Union[str, typing.List[str]]"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"pooled_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"max_sequence_length",val:": int = 512"},{name:"lora_scale",val:": typing.Optional[float] = None"}],parametersDescription:[{anchor:"diffusers.FluxInpaintPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.FluxInpaintPipeline.encode_prompt.prompt_2",description:`<strong>prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to be sent to the <code>tokenizer_2</code> and <code>text_encoder_2</code>. If not defined, <code>prompt</code> is | |
| used in all text-encoders`,name:"prompt_2"},{anchor:"diffusers.FluxInpaintPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>): | |
| torch device`,name:"device"},{anchor:"diffusers.FluxInpaintPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.FluxInpaintPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.FluxInpaintPipeline.encode_prompt.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. | |
| If not provided, pooled text embeddings will be generated from <code>prompt</code> input argument.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.FluxInpaintPipeline.encode_prompt.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>, <em>optional</em>) — | |
| A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.`,name:"lora_scale"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_inpaint.py#L337"}}),sn=new j({props:{title:"FluxControlNetInpaintPipeline",local:"diffusers.FluxControlNetInpaintPipeline",headingTag:"h2"}}),an=new v({props:{name:"class diffusers.FluxControlNetInpaintPipeline",anchor:"diffusers.FluxControlNetInpaintPipeline",parameters:[{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKL"},{name:"text_encoder",val:": CLIPTextModel"},{name:"tokenizer",val:": CLIPTokenizer"},{name:"text_encoder_2",val:": T5EncoderModel"},{name:"tokenizer_2",val:": T5TokenizerFast"},{name:"transformer",val:": FluxTransformer2DModel"},{name:"controlnet",val:": typing.Union[diffusers.models.controlnets.controlnet_flux.FluxControlNetModel, typing.List[diffusers.models.controlnets.controlnet_flux.FluxControlNetModel], typing.Tuple[diffusers.models.controlnets.controlnet_flux.FluxControlNetModel], diffusers.models.controlnets.controlnet_flux.FluxMultiControlNetModel]"}],parametersDescription:[{anchor:"diffusers.FluxControlNetInpaintPipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_11739/en/api/models/flux_transformer#diffusers.FluxTransformer2DModel">FluxTransformer2DModel</a>) — | |
| Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.`,name:"transformer"},{anchor:"diffusers.FluxControlNetInpaintPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_11739/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) — | |
| A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.FluxControlNetInpaintPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_11739/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.FluxControlNetInpaintPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>CLIPTextModel</code>) — | |
| <a href="https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel" rel="nofollow">CLIP</a>, specifically | |
| the <a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a> variant.`,name:"text_encoder"},{anchor:"diffusers.FluxControlNetInpaintPipeline.text_encoder_2",description:`<strong>text_encoder_2</strong> (<code>T5EncoderModel</code>) — | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel" rel="nofollow">T5</a>, specifically | |
| the <a href="https://huggingface.co/google/t5-v1_1-xxl" rel="nofollow">google/t5-v1_1-xxl</a> variant.`,name:"text_encoder_2"},{anchor:"diffusers.FluxControlNetInpaintPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>CLIPTokenizer</code>) — | |
| Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"},{anchor:"diffusers.FluxControlNetInpaintPipeline.tokenizer_2",description:`<strong>tokenizer_2</strong> (<code>T5TokenizerFast</code>) — | |
| Second Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast" rel="nofollow">T5TokenizerFast</a>.`,name:"tokenizer_2"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py#L174"}}),ln=new v({props:{name:"__call__",anchor:"diffusers.FluxControlNetInpaintPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"prompt_2",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"mask_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"masked_image_latents",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"control_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"strength",val:": float = 0.6"},{name:"padding_mask_crop",val:": typing.Optional[int] = None"},{name:"sigmas",val:": typing.Optional[typing.List[float]] = None"},{name:"num_inference_steps",val:": int = 28"},{name:"guidance_scale",val:": float = 7.0"},{name:"control_guidance_start",val:": typing.Union[float, typing.List[float]] = 0.0"},{name:"control_guidance_end",val:": typing.Union[float, typing.List[float]] = 1.0"},{name:"control_mode",val:": typing.Union[int, typing.List[int], NoneType] = None"},{name:"controlnet_conditioning_scale",val:": typing.Union[float, typing.List[float]] = 1.0"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.FloatTensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"pooled_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"joint_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 512"}],parametersDescription:[{anchor:"diffusers.FluxControlNetInpaintPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation.`,name:"prompt"},{anchor:"diffusers.FluxControlNetInpaintPipeline.__call__.prompt_2",description:`<strong>prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to be sent to the <code>tokenizer_2</code> and <code>text_encoder_2</code>.`,name:"prompt_2"},{anchor:"diffusers.FluxControlNetInpaintPipeline.__call__.image",description:`<strong>image</strong> (<code>PIL.Image.Image</code> or <code>List[PIL.Image.Image]</code> or <code>torch.FloatTensor</code>) — | |
| The image(s) to inpaint.`,name:"image"},{anchor:"diffusers.FluxControlNetInpaintPipeline.__call__.mask_image",description:`<strong>mask_image</strong> (<code>PIL.Image.Image</code> or <code>List[PIL.Image.Image]</code> or <code>torch.FloatTensor</code>) — | |
| The mask image(s) to use for inpainting. White pixels in the mask will be repainted, while black pixels | |
| will be preserved.`,name:"mask_image"},{anchor:"diffusers.FluxControlNetInpaintPipeline.__call__.masked_image_latents",description:`<strong>masked_image_latents</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated masked image latents.`,name:"masked_image_latents"},{anchor:"diffusers.FluxControlNetInpaintPipeline.__call__.control_image",description:`<strong>control_image</strong> (<code>PIL.Image.Image</code> or <code>List[PIL.Image.Image]</code> or <code>torch.FloatTensor</code>) — | |
| The ControlNet input condition. Image to control the generation.`,name:"control_image"},{anchor:"diffusers.FluxControlNetInpaintPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.default_sample_size * self.vae_scale_factor) — | |
| The height in pixels of the generated image.`,name:"height"},{anchor:"diffusers.FluxControlNetInpaintPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.default_sample_size * self.vae_scale_factor) — | |
| The width in pixels of the generated image.`,name:"width"},{anchor:"diffusers.FluxControlNetInpaintPipeline.__call__.strength",description:`<strong>strength</strong> (<code>float</code>, <em>optional</em>, defaults to 0.6) — | |
| Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1.`,name:"strength"},{anchor:"diffusers.FluxControlNetInpaintPipeline.__call__.padding_mask_crop",description:`<strong>padding_mask_crop</strong> (<code>int</code>, <em>optional</em>) — | |
| The size of the padding to use when cropping the mask.`,name:"padding_mask_crop"},{anchor:"diffusers.FluxControlNetInpaintPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 28) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.FluxControlNetInpaintPipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) — | |
| Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in | |
| their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed | |
| will be used.`,name:"sigmas"},{anchor:"diffusers.FluxControlNetInpaintPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 7.0) — | |
| Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion | |
| Guidance</a>.`,name:"guidance_scale"},{anchor:"diffusers.FluxControlNetInpaintPipeline.__call__.control_guidance_start",description:`<strong>control_guidance_start</strong> (<code>float</code> or <code>List[float]</code>, <em>optional</em>, defaults to 0.0) — | |
| The percentage of total steps at which the ControlNet starts applying.`,name:"control_guidance_start"},{anchor:"diffusers.FluxControlNetInpaintPipeline.__call__.control_guidance_end",description:`<strong>control_guidance_end</strong> (<code>float</code> or <code>List[float]</code>, <em>optional</em>, defaults to 1.0) — | |
| The percentage of total steps at which the ControlNet stops applying.`,name:"control_guidance_end"},{anchor:"diffusers.FluxControlNetInpaintPipeline.__call__.control_mode",description:`<strong>control_mode</strong> (<code>int</code> or <code>List[int]</code>, <em>optional</em>) — | |
| The mode for the ControlNet. If multiple ControlNets are used, this should be a list.`,name:"control_mode"},{anchor:"diffusers.FluxControlNetInpaintPipeline.__call__.controlnet_conditioning_scale",description:`<strong>controlnet_conditioning_scale</strong> (<code>float</code> or <code>List[float]</code>, <em>optional</em>, defaults to 1.0) — | |
| The outputs of the ControlNet are multiplied by <code>controlnet_conditioning_scale</code> before they are added | |
| to the residual in the original transformer.`,name:"controlnet_conditioning_scale"},{anchor:"diffusers.FluxControlNetInpaintPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.FluxControlNetInpaintPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or more <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> to | |
| make generation deterministic.`,name:"generator"},{anchor:"diffusers.FluxControlNetInpaintPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts.`,name:"latents"},{anchor:"diffusers.FluxControlNetInpaintPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting.`,name:"prompt_embeds"},{anchor:"diffusers.FluxControlNetInpaintPipeline.__call__.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated pooled text embeddings.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.FluxControlNetInpaintPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between <code>PIL.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.FluxControlNetInpaintPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.flux.FluxPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.FluxControlNetInpaintPipeline.__call__.joint_attention_kwargs",description:`<strong>joint_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| Additional keyword arguments to be passed to the joint attention mechanism.`,name:"joint_attention_kwargs"},{anchor:"diffusers.FluxControlNetInpaintPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising step during the inference.`,name:"callback_on_step_end"},{anchor:"diffusers.FluxControlNetInpaintPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List[str]</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.FluxControlNetInpaintPipeline.__call__.max_sequence_length",description:`<strong>max_sequence_length</strong> (<code>int</code>, <em>optional</em>, defaults to 512) — | |
| The maximum length of the sequence to be generated.`,name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py#L738",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.flux.FluxPipelineOutput</code> if <code>return_dict</code> | |
| is True, otherwise a <code>tuple</code>. When returning a tuple, the first element is a list with the generated | |
| images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.flux.FluxPipelineOutput</code> or <code>tuple</code></p> | |
| `}}),we=new ae({props:{anchor:"diffusers.FluxControlNetInpaintPipeline.__call__.example",$$slots:{default:[np]},$$scope:{ctx:J}}}),rn=new v({props:{name:"encode_prompt",anchor:"diffusers.FluxControlNetInpaintPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"prompt_2",val:": typing.Union[str, typing.List[str]]"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"pooled_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"max_sequence_length",val:": int = 512"},{name:"lora_scale",val:": typing.Optional[float] = None"}],parametersDescription:[{anchor:"diffusers.FluxControlNetInpaintPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.FluxControlNetInpaintPipeline.encode_prompt.prompt_2",description:`<strong>prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to be sent to the <code>tokenizer_2</code> and <code>text_encoder_2</code>. If not defined, <code>prompt</code> is | |
| used in all text-encoders`,name:"prompt_2"},{anchor:"diffusers.FluxControlNetInpaintPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>): | |
| torch device`,name:"device"},{anchor:"diffusers.FluxControlNetInpaintPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.FluxControlNetInpaintPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.FluxControlNetInpaintPipeline.encode_prompt.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. | |
| If not provided, pooled text embeddings will be generated from <code>prompt</code> input argument.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.FluxControlNetInpaintPipeline.encode_prompt.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>, <em>optional</em>) — | |
| A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.`,name:"lora_scale"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py#L346"}}),pn=new j({props:{title:"FluxControlNetImg2ImgPipeline",local:"diffusers.FluxControlNetImg2ImgPipeline",headingTag:"h2"}}),dn=new v({props:{name:"class diffusers.FluxControlNetImg2ImgPipeline",anchor:"diffusers.FluxControlNetImg2ImgPipeline",parameters:[{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKL"},{name:"text_encoder",val:": CLIPTextModel"},{name:"tokenizer",val:": CLIPTokenizer"},{name:"text_encoder_2",val:": T5EncoderModel"},{name:"tokenizer_2",val:": T5TokenizerFast"},{name:"transformer",val:": FluxTransformer2DModel"},{name:"controlnet",val:": typing.Union[diffusers.models.controlnets.controlnet_flux.FluxControlNetModel, typing.List[diffusers.models.controlnets.controlnet_flux.FluxControlNetModel], typing.Tuple[diffusers.models.controlnets.controlnet_flux.FluxControlNetModel], diffusers.models.controlnets.controlnet_flux.FluxMultiControlNetModel]"}],parametersDescription:[{anchor:"diffusers.FluxControlNetImg2ImgPipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_11739/en/api/models/flux_transformer#diffusers.FluxTransformer2DModel">FluxTransformer2DModel</a>) — | |
| Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.`,name:"transformer"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_11739/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) — | |
| A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_11739/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>CLIPTextModel</code>) — | |
| <a href="https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel" rel="nofollow">CLIP</a>, specifically | |
| the <a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a> variant.`,name:"text_encoder"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.text_encoder_2",description:`<strong>text_encoder_2</strong> (<code>T5EncoderModel</code>) — | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel" rel="nofollow">T5</a>, specifically | |
| the <a href="https://huggingface.co/google/t5-v1_1-xxl" rel="nofollow">google/t5-v1_1-xxl</a> variant.`,name:"text_encoder_2"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>CLIPTokenizer</code>) — | |
| Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.tokenizer_2",description:`<strong>tokenizer_2</strong> (<code>T5TokenizerFast</code>) — | |
| Second Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast" rel="nofollow">T5TokenizerFast</a>.`,name:"tokenizer_2"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py#L172"}}),cn=new v({props:{name:"__call__",anchor:"diffusers.FluxControlNetImg2ImgPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"prompt_2",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"control_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"strength",val:": float = 0.6"},{name:"num_inference_steps",val:": int = 28"},{name:"sigmas",val:": typing.Optional[typing.List[float]] = None"},{name:"guidance_scale",val:": float = 7.0"},{name:"control_guidance_start",val:": typing.Union[float, typing.List[float]] = 0.0"},{name:"control_guidance_end",val:": typing.Union[float, typing.List[float]] = 1.0"},{name:"control_mode",val:": typing.Union[int, typing.List[int], NoneType] = None"},{name:"controlnet_conditioning_scale",val:": typing.Union[float, typing.List[float]] = 1.0"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.FloatTensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"pooled_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"joint_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 512"}],parametersDescription:[{anchor:"diffusers.FluxControlNetImg2ImgPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation.`,name:"prompt"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.__call__.prompt_2",description:`<strong>prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to be sent to the <code>tokenizer_2</code> and <code>text_encoder_2</code>.`,name:"prompt_2"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.__call__.image",description:`<strong>image</strong> (<code>PIL.Image.Image</code> or <code>List[PIL.Image.Image]</code> or <code>torch.FloatTensor</code>) — | |
| The image(s) to modify with the pipeline.`,name:"image"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.__call__.control_image",description:`<strong>control_image</strong> (<code>PIL.Image.Image</code> or <code>List[PIL.Image.Image]</code> or <code>torch.FloatTensor</code>) — | |
| The ControlNet input condition. Image to control the generation.`,name:"control_image"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.default_sample_size * self.vae_scale_factor) — | |
| The height in pixels of the generated image.`,name:"height"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.default_sample_size * self.vae_scale_factor) — | |
| The width in pixels of the generated image.`,name:"width"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.__call__.strength",description:`<strong>strength</strong> (<code>float</code>, <em>optional</em>, defaults to 0.6) — | |
| Conceptually, indicates how much to transform the reference <code>image</code>. Must be between 0 and 1.`,name:"strength"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 28) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) — | |
| Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in | |
| their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed | |
| will be used.`,name:"sigmas"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 7.0) — | |
| Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion | |
| Guidance</a>.`,name:"guidance_scale"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.__call__.control_mode",description:`<strong>control_mode</strong> (<code>int</code> or <code>List[int]</code>, <em>optional</em>) — | |
| The mode for the ControlNet. If multiple ControlNets are used, this should be a list.`,name:"control_mode"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.__call__.controlnet_conditioning_scale",description:`<strong>controlnet_conditioning_scale</strong> (<code>float</code> or <code>List[float]</code>, <em>optional</em>, defaults to 1.0) — | |
| The outputs of the ControlNet are multiplied by <code>controlnet_conditioning_scale</code> before they are added | |
| to the residual in the original transformer.`,name:"controlnet_conditioning_scale"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or more <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> to | |
| make generation deterministic.`,name:"generator"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts.`,name:"latents"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting.`,name:"prompt_embeds"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.__call__.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated pooled text embeddings.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between <code>PIL.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.flux.FluxPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.__call__.joint_attention_kwargs",description:`<strong>joint_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| Additional keyword arguments to be passed to the joint attention mechanism.`,name:"joint_attention_kwargs"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising step during the inference.`,name:"callback_on_step_end"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List[str]</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.__call__.max_sequence_length",description:`<strong>max_sequence_length</strong> (<code>int</code>, <em>optional</em>, defaults to 512) — | |
| The maximum length of the sequence to be generated.`,name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py#L634",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.flux.FluxPipelineOutput</code> if <code>return_dict</code> | |
| is True, otherwise a <code>tuple</code>. When returning a tuple, the first element is a list with the generated | |
| images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.flux.FluxPipelineOutput</code> or <code>tuple</code></p> | |
| `}}),Te=new ae({props:{anchor:"diffusers.FluxControlNetImg2ImgPipeline.__call__.example",$$slots:{default:[op]},$$scope:{ctx:J}}}),mn=new v({props:{name:"encode_prompt",anchor:"diffusers.FluxControlNetImg2ImgPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"prompt_2",val:": typing.Union[str, typing.List[str]]"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"pooled_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"max_sequence_length",val:": int = 512"},{name:"lora_scale",val:": typing.Optional[float] = None"}],parametersDescription:[{anchor:"diffusers.FluxControlNetImg2ImgPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.encode_prompt.prompt_2",description:`<strong>prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to be sent to the <code>tokenizer_2</code> and <code>text_encoder_2</code>. If not defined, <code>prompt</code> is | |
| used in all text-encoders`,name:"prompt_2"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>): | |
| torch device`,name:"device"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.encode_prompt.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. | |
| If not provided, pooled text embeddings will be generated from <code>prompt</code> input argument.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.FluxControlNetImg2ImgPipeline.encode_prompt.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>, <em>optional</em>) — | |
| A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.`,name:"lora_scale"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_controlnet_image_to_image.py#L335"}}),gn=new j({props:{title:"FluxControlPipeline",local:"diffusers.FluxControlPipeline",headingTag:"h2"}}),un=new v({props:{name:"class diffusers.FluxControlPipeline",anchor:"diffusers.FluxControlPipeline",parameters:[{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKL"},{name:"text_encoder",val:": CLIPTextModel"},{name:"tokenizer",val:": CLIPTokenizer"},{name:"text_encoder_2",val:": T5EncoderModel"},{name:"tokenizer_2",val:": T5TokenizerFast"},{name:"transformer",val:": FluxTransformer2DModel"}],parametersDescription:[{anchor:"diffusers.FluxControlPipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_11739/en/api/models/flux_transformer#diffusers.FluxTransformer2DModel">FluxTransformer2DModel</a>) — | |
| Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.`,name:"transformer"},{anchor:"diffusers.FluxControlPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_11739/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) — | |
| A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.FluxControlPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_11739/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.FluxControlPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>CLIPTextModel</code>) — | |
| <a href="https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel" rel="nofollow">CLIP</a>, specifically | |
| the <a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a> variant.`,name:"text_encoder"},{anchor:"diffusers.FluxControlPipeline.text_encoder_2",description:`<strong>text_encoder_2</strong> (<code>T5EncoderModel</code>) — | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel" rel="nofollow">T5</a>, specifically | |
| the <a href="https://huggingface.co/google/t5-v1_1-xxl" rel="nofollow">google/t5-v1_1-xxl</a> variant.`,name:"text_encoder_2"},{anchor:"diffusers.FluxControlPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>CLIPTokenizer</code>) — | |
| Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"},{anchor:"diffusers.FluxControlPipeline.tokenizer_2",description:`<strong>tokenizer_2</strong> (<code>T5TokenizerFast</code>) — | |
| Second Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast" rel="nofollow">T5TokenizerFast</a>.`,name:"tokenizer_2"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_control.py#L159"}}),fn=new v({props:{name:"__call__",anchor:"diffusers.FluxControlPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"prompt_2",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"control_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"num_inference_steps",val:": int = 28"},{name:"sigmas",val:": typing.Optional[typing.List[float]] = None"},{name:"guidance_scale",val:": float = 3.5"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.FloatTensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"pooled_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"joint_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 512"}],parametersDescription:[{anchor:"diffusers.FluxControlPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.FluxControlPipeline.__call__.prompt_2",description:`<strong>prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to be sent to <code>tokenizer_2</code> and <code>text_encoder_2</code>. If not defined, <code>prompt</code> is | |
| will be used instead`,name:"prompt_2"},{anchor:"diffusers.FluxControlPipeline.__call__.control_image",description:`<strong>control_image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, <code>List[np.ndarray]</code>, — | |
| <code>List[List[torch.Tensor]]</code>, <code>List[List[np.ndarray]]</code> or <code>List[List[PIL.Image.Image]]</code>): | |
| The ControlNet input condition to provide guidance to the <code>unet</code> for generation. If the type is | |
| specified as <code>torch.Tensor</code>, it is passed to ControlNet as is. <code>PIL.Image.Image</code> can also be accepted | |
| as an image. The dimensions of the output image defaults to <code>image</code>’s dimensions. If height and/or | |
| width are passed, <code>image</code> is resized accordingly. If multiple ControlNets are specified in <code>init</code>, | |
| images must be passed as a list such that each element of the list can be correctly batched for input | |
| to a single ControlNet.`,name:"control_image"},{anchor:"diffusers.FluxControlPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The height in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"height"},{anchor:"diffusers.FluxControlPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The width in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"width"},{anchor:"diffusers.FluxControlPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.FluxControlPipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) — | |
| Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in | |
| their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed | |
| will be used.`,name:"sigmas"},{anchor:"diffusers.FluxControlPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 3.5) — | |
| Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion | |
| Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2. | |
| of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting | |
| <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images that are closely linked to | |
| the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.FluxControlPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.FluxControlPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.FluxControlPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will ge generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.FluxControlPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.FluxControlPipeline.__call__.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. | |
| If not provided, pooled text embeddings will be generated from <code>prompt</code> input argument.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.FluxControlPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.FluxControlPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.flux.FluxPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.FluxControlPipeline.__call__.joint_attention_kwargs",description:`<strong>joint_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"joint_attention_kwargs"},{anchor:"diffusers.FluxControlPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.FluxControlPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.FluxControlPipeline.__call__.max_sequence_length",description:"<strong>max_sequence_length</strong> (<code>int</code> defaults to 512) — Maximum sequence length to use with the <code>prompt</code>.",name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_control.py#L610",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.flux.FluxPipelineOutput</code> if <code>return_dict</code> | |
| is True, otherwise a <code>tuple</code>. When returning a tuple, the first element is a list with the generated | |
| images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.flux.FluxPipelineOutput</code> or <code>tuple</code></p> | |
| `}}),Ie=new ae({props:{anchor:"diffusers.FluxControlPipeline.__call__.example",$$slots:{default:[sp]},$$scope:{ctx:J}}}),hn=new v({props:{name:"disable_vae_slicing",anchor:"diffusers.FluxControlPipeline.disable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_control.py#L501"}}),_n=new v({props:{name:"disable_vae_tiling",anchor:"diffusers.FluxControlPipeline.disable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_control.py#L516"}}),bn=new v({props:{name:"enable_vae_slicing",anchor:"diffusers.FluxControlPipeline.enable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_control.py#L494"}}),yn=new v({props:{name:"enable_vae_tiling",anchor:"diffusers.FluxControlPipeline.enable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_control.py#L508"}}),Mn=new v({props:{name:"encode_prompt",anchor:"diffusers.FluxControlPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"prompt_2",val:": typing.Union[str, typing.List[str]]"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"pooled_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"max_sequence_length",val:": int = 512"},{name:"lora_scale",val:": typing.Optional[float] = None"}],parametersDescription:[{anchor:"diffusers.FluxControlPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.FluxControlPipeline.encode_prompt.prompt_2",description:`<strong>prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to be sent to the <code>tokenizer_2</code> and <code>text_encoder_2</code>. If not defined, <code>prompt</code> is | |
| used in all text-encoders`,name:"prompt_2"},{anchor:"diffusers.FluxControlPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>): | |
| torch device`,name:"device"},{anchor:"diffusers.FluxControlPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.FluxControlPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.FluxControlPipeline.encode_prompt.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. | |
| If not provided, pooled text embeddings will be generated from <code>prompt</code> input argument.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.FluxControlPipeline.encode_prompt.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>, <em>optional</em>) — | |
| A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.`,name:"lora_scale"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_control.py#L324"}}),xn=new j({props:{title:"FluxControlImg2ImgPipeline",local:"diffusers.FluxControlImg2ImgPipeline",headingTag:"h2"}}),wn=new v({props:{name:"class diffusers.FluxControlImg2ImgPipeline",anchor:"diffusers.FluxControlImg2ImgPipeline",parameters:[{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKL"},{name:"text_encoder",val:": CLIPTextModel"},{name:"tokenizer",val:": CLIPTokenizer"},{name:"text_encoder_2",val:": T5EncoderModel"},{name:"tokenizer_2",val:": T5TokenizerFast"},{name:"transformer",val:": FluxTransformer2DModel"}],parametersDescription:[{anchor:"diffusers.FluxControlImg2ImgPipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_11739/en/api/models/flux_transformer#diffusers.FluxTransformer2DModel">FluxTransformer2DModel</a>) — | |
| Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.`,name:"transformer"},{anchor:"diffusers.FluxControlImg2ImgPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_11739/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) — | |
| A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.FluxControlImg2ImgPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_11739/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.FluxControlImg2ImgPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>CLIPTextModel</code>) — | |
| <a href="https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel" rel="nofollow">CLIP</a>, specifically | |
| the <a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a> variant.`,name:"text_encoder"},{anchor:"diffusers.FluxControlImg2ImgPipeline.text_encoder_2",description:`<strong>text_encoder_2</strong> (<code>T5EncoderModel</code>) — | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel" rel="nofollow">T5</a>, specifically | |
| the <a href="https://huggingface.co/google/t5-v1_1-xxl" rel="nofollow">google/t5-v1_1-xxl</a> variant.`,name:"text_encoder_2"},{anchor:"diffusers.FluxControlImg2ImgPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>CLIPTokenizer</code>) — | |
| Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"},{anchor:"diffusers.FluxControlImg2ImgPipeline.tokenizer_2",description:`<strong>tokenizer_2</strong> (<code>T5TokenizerFast</code>) — | |
| Second Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast" rel="nofollow">T5TokenizerFast</a>.`,name:"tokenizer_2"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_control_img2img.py#L178"}}),Tn=new v({props:{name:"__call__",anchor:"diffusers.FluxControlImg2ImgPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"prompt_2",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"control_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"strength",val:": float = 0.6"},{name:"num_inference_steps",val:": int = 28"},{name:"sigmas",val:": typing.Optional[typing.List[float]] = None"},{name:"guidance_scale",val:": float = 7.0"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.FloatTensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"pooled_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"joint_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 512"}],parametersDescription:[{anchor:"diffusers.FluxControlImg2ImgPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.FluxControlImg2ImgPipeline.__call__.prompt_2",description:`<strong>prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to be sent to <code>tokenizer_2</code> and <code>text_encoder_2</code>. If not defined, <code>prompt</code> is | |
| will be used instead`,name:"prompt_2"},{anchor:"diffusers.FluxControlImg2ImgPipeline.__call__.image",description:`<strong>image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, or <code>List[np.ndarray]</code>) — | |
| <code>Image</code>, numpy array or tensor representing an image batch to be used as the starting point. For both | |
| numpy array and pytorch tensor, the expected value range is between <code>[0, 1]</code> If it’s a tensor or a list | |
| or tensors, the expected shape should be <code>(B, C, H, W)</code> or <code>(C, H, W)</code>. If it is a numpy array or a | |
| list of arrays, the expected shape should be <code>(B, H, W, C)</code> or <code>(H, W, C)</code> It can also accept image | |
| latents as <code>image</code>, but if passing latents directly it is not encoded again.`,name:"image"},{anchor:"diffusers.FluxControlImg2ImgPipeline.__call__.control_image",description:`<strong>control_image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, <code>List[np.ndarray]</code>, — | |
| <code>List[List[torch.Tensor]]</code>, <code>List[List[np.ndarray]]</code> or <code>List[List[PIL.Image.Image]]</code>): | |
| The ControlNet input condition to provide guidance to the <code>unet</code> for generation. If the type is | |
| specified as <code>torch.Tensor</code>, it is passed to ControlNet as is. <code>PIL.Image.Image</code> can also be accepted | |
| as an image. The dimensions of the output image defaults to <code>image</code>’s dimensions. If height and/or | |
| width are passed, <code>image</code> is resized accordingly. If multiple ControlNets are specified in <code>init</code>, | |
| images must be passed as a list such that each element of the list can be correctly batched for input | |
| to a single ControlNet.`,name:"control_image"},{anchor:"diffusers.FluxControlImg2ImgPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The height in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"height"},{anchor:"diffusers.FluxControlImg2ImgPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The width in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"width"},{anchor:"diffusers.FluxControlImg2ImgPipeline.__call__.strength",description:`<strong>strength</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) — | |
| Indicates extent to transform the reference <code>image</code>. Must be between 0 and 1. <code>image</code> is used as a | |
| starting point and more noise is added the higher the <code>strength</code>. The number of denoising steps depends | |
| on the amount of noise initially added. When <code>strength</code> is 1, added noise is maximum and the denoising | |
| process runs for the full number of iterations specified in <code>num_inference_steps</code>. A value of 1 | |
| essentially ignores <code>image</code>.`,name:"strength"},{anchor:"diffusers.FluxControlImg2ImgPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.FluxControlImg2ImgPipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) — | |
| Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in | |
| their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed | |
| will be used.`,name:"sigmas"},{anchor:"diffusers.FluxControlImg2ImgPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 7.0) — | |
| Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion | |
| Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2. | |
| of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting | |
| <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images that are closely linked to | |
| the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.FluxControlImg2ImgPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.FluxControlImg2ImgPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.FluxControlImg2ImgPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will ge generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.FluxControlImg2ImgPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.FluxControlImg2ImgPipeline.__call__.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. | |
| If not provided, pooled text embeddings will be generated from <code>prompt</code> input argument.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.FluxControlImg2ImgPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.FluxControlImg2ImgPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.flux.FluxPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.FluxControlImg2ImgPipeline.__call__.joint_attention_kwargs",description:`<strong>joint_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"joint_attention_kwargs"},{anchor:"diffusers.FluxControlImg2ImgPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.FluxControlImg2ImgPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.FluxControlImg2ImgPipeline.__call__.max_sequence_length",description:"<strong>max_sequence_length</strong> (<code>int</code> defaults to 512) — Maximum sequence length to use with the <code>prompt</code>.",name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_control_img2img.py#L634",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.flux.FluxPipelineOutput</code> if <code>return_dict</code> | |
| is True, otherwise a <code>tuple</code>. When returning a tuple, the first element is a list with the generated | |
| images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.flux.FluxPipelineOutput</code> or <code>tuple</code></p> | |
| `}}),Fe=new ae({props:{anchor:"diffusers.FluxControlImg2ImgPipeline.__call__.example",$$slots:{default:[ap]},$$scope:{ctx:J}}}),In=new v({props:{name:"encode_prompt",anchor:"diffusers.FluxControlImg2ImgPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"prompt_2",val:": typing.Union[str, typing.List[str]]"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"pooled_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"max_sequence_length",val:": int = 512"},{name:"lora_scale",val:": typing.Optional[float] = None"}],parametersDescription:[{anchor:"diffusers.FluxControlImg2ImgPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.FluxControlImg2ImgPipeline.encode_prompt.prompt_2",description:`<strong>prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to be sent to the <code>tokenizer_2</code> and <code>text_encoder_2</code>. If not defined, <code>prompt</code> is | |
| used in all text-encoders`,name:"prompt_2"},{anchor:"diffusers.FluxControlImg2ImgPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>): | |
| torch device`,name:"device"},{anchor:"diffusers.FluxControlImg2ImgPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.FluxControlImg2ImgPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.FluxControlImg2ImgPipeline.encode_prompt.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. | |
| If not provided, pooled text embeddings will be generated from <code>prompt</code> input argument.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.FluxControlImg2ImgPipeline.encode_prompt.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>, <em>optional</em>) — | |
| A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.`,name:"lora_scale"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_control_img2img.py#L335"}}),vn=new j({props:{title:"FluxPriorReduxPipeline",local:"diffusers.FluxPriorReduxPipeline",headingTag:"h2"}}),Jn=new v({props:{name:"class diffusers.FluxPriorReduxPipeline",anchor:"diffusers.FluxPriorReduxPipeline",parameters:[{name:"image_encoder",val:": SiglipVisionModel"},{name:"feature_extractor",val:": SiglipImageProcessor"},{name:"image_embedder",val:": ReduxImageEncoder"},{name:"text_encoder",val:": CLIPTextModel = None"},{name:"tokenizer",val:": CLIPTokenizer = None"},{name:"text_encoder_2",val:": T5EncoderModel = None"},{name:"tokenizer_2",val:": T5TokenizerFast = None"}],parametersDescription:[{anchor:"diffusers.FluxPriorReduxPipeline.image_encoder",description:`<strong>image_encoder</strong> (<code>SiglipVisionModel</code>) — | |
| SIGLIP vision model to encode the input image.`,name:"image_encoder"},{anchor:"diffusers.FluxPriorReduxPipeline.feature_extractor",description:`<strong>feature_extractor</strong> (<code>SiglipImageProcessor</code>) — | |
| Image processor for preprocessing images for the SIGLIP model.`,name:"feature_extractor"},{anchor:"diffusers.FluxPriorReduxPipeline.image_embedder",description:`<strong>image_embedder</strong> (<code>ReduxImageEncoder</code>) — | |
| Redux image encoder to process the SIGLIP embeddings.`,name:"image_embedder"},{anchor:"diffusers.FluxPriorReduxPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>CLIPTextModel</code>, <em>optional</em>) — | |
| <a href="https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel" rel="nofollow">CLIP</a>, specifically | |
| the <a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a> variant.`,name:"text_encoder"},{anchor:"diffusers.FluxPriorReduxPipeline.text_encoder_2",description:`<strong>text_encoder_2</strong> (<code>T5EncoderModel</code>, <em>optional</em>) — | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel" rel="nofollow">T5</a>, specifically | |
| the <a href="https://huggingface.co/google/t5-v1_1-xxl" rel="nofollow">google/t5-v1_1-xxl</a> variant.`,name:"text_encoder_2"},{anchor:"diffusers.FluxPriorReduxPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>CLIPTokenizer</code>, <em>optional</em>) — | |
| Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"},{anchor:"diffusers.FluxPriorReduxPipeline.tokenizer_2",description:`<strong>tokenizer_2</strong> (<code>T5TokenizerFast</code>, <em>optional</em>) — | |
| Second Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast" rel="nofollow">T5TokenizerFast</a>.`,name:"tokenizer_2"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_prior_redux.py#L84"}}),jn=new v({props:{name:"__call__",anchor:"diffusers.FluxPriorReduxPipeline.__call__",parameters:[{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]]"},{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"prompt_2",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"pooled_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"prompt_embeds_scale",val:": typing.Union[float, typing.List[float], NoneType] = 1.0"},{name:"pooled_prompt_embeds_scale",val:": typing.Union[float, typing.List[float], NoneType] = 1.0"},{name:"return_dict",val:": bool = True"}],parametersDescription:[{anchor:"diffusers.FluxPriorReduxPipeline.__call__.image",description:`<strong>image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, or <code>List[np.ndarray]</code>) — | |
| <code>Image</code>, numpy array or tensor representing an image batch to be used as the starting point. For both | |
| numpy array and pytorch tensor, the expected value range is between <code>[0, 1]</code> If it’s a tensor or a list | |
| or tensors, the expected shape should be <code>(B, C, H, W)</code> or <code>(C, H, W)</code>. If it is a numpy array or a | |
| list of arrays, the expected shape should be <code>(B, H, W, C)</code> or <code>(H, W, C)</code>`,name:"image"},{anchor:"diffusers.FluxPriorReduxPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. <strong>experimental feature</strong>: to use this feature, | |
| make sure to explicitly load text encoders to the pipeline. Prompts will be ignored if text encoders | |
| are not loaded.`,name:"prompt"},{anchor:"diffusers.FluxPriorReduxPipeline.__call__.prompt_2",description:`<strong>prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to be sent to the <code>tokenizer_2</code> and <code>text_encoder_2</code>.`,name:"prompt_2"},{anchor:"diffusers.FluxPriorReduxPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting.`,name:"prompt_embeds"},{anchor:"diffusers.FluxPriorReduxPipeline.__call__.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated pooled text embeddings.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.FluxPriorReduxPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.flux.FluxPriorReduxPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_prior_redux.py#L371",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.flux.FluxPriorReduxPipelineOutput</code> if <code>return_dict</code> is True, otherwise a <code>tuple</code>. When | |
| returning a tuple, the first element is a list with the generated images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.flux.FluxPriorReduxPipelineOutput</code> or <code>tuple</code></p> | |
| `}}),Ze=new ae({props:{anchor:"diffusers.FluxPriorReduxPipeline.__call__.example",$$slots:{default:[lp]},$$scope:{ctx:J}}}),Un=new v({props:{name:"encode_prompt",anchor:"diffusers.FluxPriorReduxPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"prompt_2",val:": typing.Union[str, typing.List[str]]"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"pooled_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"max_sequence_length",val:": int = 512"},{name:"lora_scale",val:": typing.Optional[float] = None"}],parametersDescription:[{anchor:"diffusers.FluxPriorReduxPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.FluxPriorReduxPipeline.encode_prompt.prompt_2",description:`<strong>prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to be sent to the <code>tokenizer_2</code> and <code>text_encoder_2</code>. If not defined, <code>prompt</code> is | |
| used in all text-encoders`,name:"prompt_2"},{anchor:"diffusers.FluxPriorReduxPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>): | |
| torch device`,name:"device"},{anchor:"diffusers.FluxPriorReduxPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.FluxPriorReduxPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.FluxPriorReduxPipeline.encode_prompt.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. | |
| If not provided, pooled text embeddings will be generated from <code>prompt</code> input argument.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.FluxPriorReduxPipeline.encode_prompt.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>, <em>optional</em>) — | |
| A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.`,name:"lora_scale"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_prior_redux.py#L292"}}),Fn=new j({props:{title:"FluxFillPipeline",local:"diffusers.FluxFillPipeline",headingTag:"h2"}}),Zn=new v({props:{name:"class diffusers.FluxFillPipeline",anchor:"diffusers.FluxFillPipeline",parameters:[{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKL"},{name:"text_encoder",val:": CLIPTextModel"},{name:"tokenizer",val:": CLIPTokenizer"},{name:"text_encoder_2",val:": T5EncoderModel"},{name:"tokenizer_2",val:": T5TokenizerFast"},{name:"transformer",val:": FluxTransformer2DModel"}],parametersDescription:[{anchor:"diffusers.FluxFillPipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_11739/en/api/models/flux_transformer#diffusers.FluxTransformer2DModel">FluxTransformer2DModel</a>) — | |
| Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.`,name:"transformer"},{anchor:"diffusers.FluxFillPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_11739/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) — | |
| A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.FluxFillPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_11739/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.FluxFillPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>CLIPTextModel</code>) — | |
| <a href="https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel" rel="nofollow">CLIP</a>, specifically | |
| the <a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a> variant.`,name:"text_encoder"},{anchor:"diffusers.FluxFillPipeline.text_encoder_2",description:`<strong>text_encoder_2</strong> (<code>T5EncoderModel</code>) — | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel" rel="nofollow">T5</a>, specifically | |
| the <a href="https://huggingface.co/google/t5-v1_1-xxl" rel="nofollow">google/t5-v1_1-xxl</a> variant.`,name:"text_encoder_2"},{anchor:"diffusers.FluxFillPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>CLIPTokenizer</code>) — | |
| Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"},{anchor:"diffusers.FluxFillPipeline.tokenizer_2",description:`<strong>tokenizer_2</strong> (<code>T5TokenizerFast</code>) — | |
| Second Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast" rel="nofollow">T5TokenizerFast</a>.`,name:"tokenizer_2"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_fill.py#L167"}}),kn=new v({props:{name:"__call__",anchor:"diffusers.FluxFillPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"prompt_2",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"image",val:": typing.Optional[torch.FloatTensor] = None"},{name:"mask_image",val:": typing.Optional[torch.FloatTensor] = None"},{name:"masked_image_latents",val:": typing.Optional[torch.FloatTensor] = None"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"strength",val:": float = 1.0"},{name:"num_inference_steps",val:": int = 50"},{name:"sigmas",val:": typing.Optional[typing.List[float]] = None"},{name:"guidance_scale",val:": float = 30.0"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.FloatTensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"pooled_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"joint_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 512"}],parametersDescription:[{anchor:"diffusers.FluxFillPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.FluxFillPipeline.__call__.prompt_2",description:`<strong>prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to be sent to <code>tokenizer_2</code> and <code>text_encoder_2</code>. If not defined, <code>prompt</code> is | |
| will be used instead`,name:"prompt_2"},{anchor:"diffusers.FluxFillPipeline.__call__.image",description:`<strong>image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, or <code>List[np.ndarray]</code>) — | |
| <code>Image</code>, numpy array or tensor representing an image batch to be used as the starting point. For both | |
| numpy array and pytorch tensor, the expected value range is between <code>[0, 1]</code> If it’s a tensor or a list | |
| or tensors, the expected shape should be <code>(B, C, H, W)</code> or <code>(C, H, W)</code>. If it is a numpy array or a | |
| list of arrays, the expected shape should be <code>(B, H, W, C)</code> or <code>(H, W, C)</code>.`,name:"image"},{anchor:"diffusers.FluxFillPipeline.__call__.mask_image",description:`<strong>mask_image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, or <code>List[np.ndarray]</code>) — | |
| <code>Image</code>, numpy array or tensor representing an image batch to mask <code>image</code>. White pixels in the mask | |
| are repainted while black pixels are preserved. If <code>mask_image</code> is a PIL image, it is converted to a | |
| single channel (luminance) before use. If it’s a numpy array or pytorch tensor, it should contain one | |
| color channel (L) instead of 3, so the expected shape for pytorch tensor would be <code>(B, 1, H, W)</code>, <code>(B, H, W)</code>, <code>(1, H, W)</code>, <code>(H, W)</code>. And for numpy array would be for <code>(B, H, W, 1)</code>, <code>(B, H, W)</code>, <code>(H, W, 1)</code>, or <code>(H, W)</code>.`,name:"mask_image"},{anchor:"diffusers.FluxFillPipeline.__call__.mask_image_latent",description:`<strong>mask_image_latent</strong> (<code>torch.Tensor</code>, <code>List[torch.Tensor]</code>) — | |
| <code>Tensor</code> representing an image batch to mask <code>image</code> generated by VAE. If not provided, the mask | |
| latents tensor will ge generated by <code>mask_image</code>.`,name:"mask_image_latent"},{anchor:"diffusers.FluxFillPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The height in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"height"},{anchor:"diffusers.FluxFillPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The width in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"width"},{anchor:"diffusers.FluxFillPipeline.__call__.strength",description:`<strong>strength</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) — | |
| Indicates extent to transform the reference <code>image</code>. Must be between 0 and 1. <code>image</code> is used as a | |
| starting point and more noise is added the higher the <code>strength</code>. The number of denoising steps depends | |
| on the amount of noise initially added. When <code>strength</code> is 1, added noise is maximum and the denoising | |
| process runs for the full number of iterations specified in <code>num_inference_steps</code>. A value of 1 | |
| essentially ignores <code>image</code>.`,name:"strength"},{anchor:"diffusers.FluxFillPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.FluxFillPipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) — | |
| Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in | |
| their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed | |
| will be used.`,name:"sigmas"},{anchor:"diffusers.FluxFillPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 30.0) — | |
| Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion | |
| Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2. | |
| of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting | |
| <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images that are closely linked to | |
| the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.FluxFillPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.FluxFillPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.FluxFillPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will ge generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.FluxFillPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.FluxFillPipeline.__call__.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. | |
| If not provided, pooled text embeddings will be generated from <code>prompt</code> input argument.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.FluxFillPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.FluxFillPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.flux.FluxPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.FluxFillPipeline.__call__.joint_attention_kwargs",description:`<strong>joint_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"joint_attention_kwargs"},{anchor:"diffusers.FluxFillPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.FluxFillPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.FluxFillPipeline.__call__.max_sequence_length",description:"<strong>max_sequence_length</strong> (<code>int</code> defaults to 512) — Maximum sequence length to use with the <code>prompt</code>.",name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_fill.py#L727",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.flux.FluxPipelineOutput</code> if <code>return_dict</code> | |
| is True, otherwise a <code>tuple</code>. When returning a tuple, the first element is a list with the generated | |
| images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.flux.FluxPipelineOutput</code> or <code>tuple</code></p> | |
| `}}),ke=new ae({props:{anchor:"diffusers.FluxFillPipeline.__call__.example",$$slots:{default:[rp]},$$scope:{ctx:J}}}),Cn=new v({props:{name:"disable_vae_slicing",anchor:"diffusers.FluxFillPipeline.disable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_fill.py#L638"}}),Wn=new v({props:{name:"disable_vae_tiling",anchor:"diffusers.FluxFillPipeline.disable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_fill.py#L653"}}),Gn=new v({props:{name:"enable_vae_slicing",anchor:"diffusers.FluxFillPipeline.enable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_fill.py#L631"}}),Bn=new v({props:{name:"enable_vae_tiling",anchor:"diffusers.FluxFillPipeline.enable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_fill.py#L645"}}),Nn=new v({props:{name:"encode_prompt",anchor:"diffusers.FluxFillPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"prompt_2",val:": typing.Union[str, typing.List[str]]"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"pooled_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"max_sequence_length",val:": int = 512"},{name:"lora_scale",val:": typing.Optional[float] = None"}],parametersDescription:[{anchor:"diffusers.FluxFillPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.FluxFillPipeline.encode_prompt.prompt_2",description:`<strong>prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to be sent to the <code>tokenizer_2</code> and <code>text_encoder_2</code>. If not defined, <code>prompt</code> is | |
| used in all text-encoders`,name:"prompt_2"},{anchor:"diffusers.FluxFillPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>): | |
| torch device`,name:"device"},{anchor:"diffusers.FluxFillPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.FluxFillPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.FluxFillPipeline.encode_prompt.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. | |
| If not provided, pooled text embeddings will be generated from <code>prompt</code> input argument.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.FluxFillPipeline.encode_prompt.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>, <em>optional</em>) — | |
| A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.`,name:"lora_scale"}],source:"https://github.com/huggingface/diffusers/blob/vr_11739/src/diffusers/pipelines/flux/pipeline_flux_fill.py#L419"}}),Pn=new Hi({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/flux.md"}}),{c(){d=i("meta"),w=o(),y=i("p"),b=o(),c(M.$$.fragment),r=o(),I=i("div"),I.innerHTML=fr,es=o(),Ne=i("p"),Ne.innerHTML=hr,ts=o(),Pe=i("p"),Pe.innerHTML=_r,ns=o(),c(le.$$.fragment),os=o(),$e=i("p"),$e.textContent=br,ss=o(),Le=i("table"),Le.innerHTML=yr,as=o(),Re=i("p"),Re.textContent=Mr,ls=o(),c(Xe.$$.fragment),rs=o(),Ee=i("ul"),Ee.innerHTML=xr,is=o(),c(Ve.$$.fragment),ps=o(),c(Ye.$$.fragment),ds=o(),ze=i("ul"),ze.innerHTML=wr,cs=o(),c(Qe.$$.fragment),ms=o(),c(Ae.$$.fragment),gs=o(),He=i("ul"),He.innerHTML=Tr,us=o(),c(Se.$$.fragment),fs=o(),c(qe.$$.fragment),hs=o(),De=i("p"),De.innerHTML=Ir,_s=o(),c(Oe.$$.fragment),bs=o(),Ke=i("p"),Ke.textContent=vr,ys=o(),c(et.$$.fragment),Ms=o(),c(tt.$$.fragment),xs=o(),nt=i("p"),nt.innerHTML=Jr,ws=o(),c(ot.$$.fragment),Ts=o(),st=i("p"),st.textContent=jr,Is=o(),c(at.$$.fragment),vs=o(),c(lt.$$.fragment),Js=o(),rt=i("ul"),rt.innerHTML=Ur,js=o(),c(it.$$.fragment),Us=o(),c(pt.$$.fragment),Fs=o(),dt=i("p"),dt.innerHTML=Fr,Zs=o(),c(ct.$$.fragment),ks=o(),c(mt.$$.fragment),Cs=o(),gt=i("p"),gt.innerHTML=Zr,Ws=o(),c(ut.$$.fragment),Gs=o(),c(re.$$.fragment),Bs=o(),ft=i("p"),ft.textContent=kr,Ns=o(),c(ht.$$.fragment),Ps=o(),ie=i("div"),ie.innerHTML=Cr,$s=o(),c(_t.$$.fragment),Ls=o(),bt=i("p"),bt.textContent=Wr,Rs=o(),c(yt.$$.fragment),Xs=o(),Mt=i("p"),Mt.innerHTML=Gr,Es=o(),xt=i("p"),xt.innerHTML=Br,Vs=o(),c(pe.$$.fragment),Ys=o(),c(wt.$$.fragment),zs=o(),c(Tt.$$.fragment),Qs=o(),It=i("p"),It.innerHTML=Nr,As=o(),vt=i("p"),vt.textContent=Pr,Hs=o(),c(Jt.$$.fragment),Ss=o(),c(jt.$$.fragment),qs=o(),Ut=i("p"),Ut.textContent=$r,Ds=o(),Ft=i("p"),Ft.innerHTML=Lr,Os=o(),c(Zt.$$.fragment),Ks=o(),c(kt.$$.fragment),ea=o(),Ct=i("p"),Ct.innerHTML=Rr,ta=o(),c(de.$$.fragment),na=o(),Wt=i("p"),Wt.textContent=Xr,oa=o(),Gt=i("p"),Gt.innerHTML=Er,sa=o(),c(Bt.$$.fragment),aa=o(),Nt=i("p"),Nt.textContent=Vr,la=o(),c(Pt.$$.fragment),ra=o(),c($t.$$.fragment),ia=o(),F=i("div"),c(Lt.$$.fragment),Pa=o(),Qn=i("p"),Qn.textContent=Yr,$a=o(),An=i("p"),An.innerHTML=zr,La=o(),V=i("div"),c(Rt.$$.fragment),Ra=o(),Hn=i("p"),Hn.textContent=Qr,Xa=o(),c(ce.$$.fragment),Ea=o(),me=i("div"),c(Xt.$$.fragment),Va=o(),Sn=i("p"),Sn.innerHTML=Ar,Ya=o(),ge=i("div"),c(Et.$$.fragment),za=o(),qn=i("p"),qn.innerHTML=Hr,Qa=o(),ue=i("div"),c(Vt.$$.fragment),Aa=o(),Dn=i("p"),Dn.textContent=Sr,Ha=o(),fe=i("div"),c(Yt.$$.fragment),Sa=o(),On=i("p"),On.textContent=qr,qa=o(),Kn=i("div"),c(zt.$$.fragment),pa=o(),c(Qt.$$.fragment),da=o(),Z=i("div"),c(At.$$.fragment),Da=o(),eo=i("p"),eo.textContent=Dr,Oa=o(),to=i("p"),to.innerHTML=Or,Ka=o(),Y=i("div"),c(Ht.$$.fragment),el=o(),no=i("p"),no.textContent=Kr,tl=o(),c(he.$$.fragment),nl=o(),_e=i("div"),c(St.$$.fragment),ol=o(),oo=i("p"),oo.innerHTML=ei,sl=o(),be=i("div"),c(qt.$$.fragment),al=o(),so=i("p"),so.innerHTML=ti,ll=o(),ye=i("div"),c(Dt.$$.fragment),rl=o(),ao=i("p"),ao.textContent=ni,il=o(),Me=i("div"),c(Ot.$$.fragment),pl=o(),lo=i("p"),lo.textContent=oi,dl=o(),ro=i("div"),c(Kt.$$.fragment),ca=o(),c(en.$$.fragment),ma=o(),P=i("div"),c(tn.$$.fragment),cl=o(),io=i("p"),io.textContent=si,ml=o(),po=i("p"),po.innerHTML=ai,gl=o(),z=i("div"),c(nn.$$.fragment),ul=o(),co=i("p"),co.textContent=li,fl=o(),c(xe.$$.fragment),hl=o(),mo=i("div"),c(on.$$.fragment),ga=o(),c(sn.$$.fragment),ua=o(),$=i("div"),c(an.$$.fragment),_l=o(),go=i("p"),go.textContent=ri,bl=o(),uo=i("p"),uo.innerHTML=ii,yl=o(),Q=i("div"),c(ln.$$.fragment),Ml=o(),fo=i("p"),fo.textContent=pi,xl=o(),c(we.$$.fragment),wl=o(),ho=i("div"),c(rn.$$.fragment),fa=o(),c(pn.$$.fragment),ha=o(),L=i("div"),c(dn.$$.fragment),Tl=o(),_o=i("p"),_o.textContent=di,Il=o(),bo=i("p"),bo.innerHTML=ci,vl=o(),A=i("div"),c(cn.$$.fragment),Jl=o(),yo=i("p"),yo.textContent=mi,jl=o(),c(Te.$$.fragment),Ul=o(),Mo=i("div"),c(mn.$$.fragment),_a=o(),c(gn.$$.fragment),ba=o(),k=i("div"),c(un.$$.fragment),Fl=o(),xo=i("p"),xo.textContent=gi,Zl=o(),wo=i("p"),wo.innerHTML=ui,kl=o(),H=i("div"),c(fn.$$.fragment),Cl=o(),To=i("p"),To.textContent=fi,Wl=o(),c(Ie.$$.fragment),Gl=o(),ve=i("div"),c(hn.$$.fragment),Bl=o(),Io=i("p"),Io.innerHTML=hi,Nl=o(),Je=i("div"),c(_n.$$.fragment),Pl=o(),vo=i("p"),vo.innerHTML=_i,$l=o(),je=i("div"),c(bn.$$.fragment),Ll=o(),Jo=i("p"),Jo.textContent=bi,Rl=o(),Ue=i("div"),c(yn.$$.fragment),Xl=o(),jo=i("p"),jo.textContent=yi,El=o(),Uo=i("div"),c(Mn.$$.fragment),ya=o(),c(xn.$$.fragment),Ma=o(),R=i("div"),c(wn.$$.fragment),Vl=o(),Fo=i("p"),Fo.textContent=Mi,Yl=o(),Zo=i("p"),Zo.innerHTML=xi,zl=o(),S=i("div"),c(Tn.$$.fragment),Ql=o(),ko=i("p"),ko.textContent=wi,Al=o(),c(Fe.$$.fragment),Hl=o(),Co=i("div"),c(In.$$.fragment),xa=o(),c(vn.$$.fragment),wa=o(),X=i("div"),c(Jn.$$.fragment),Sl=o(),Wo=i("p"),Wo.textContent=Ti,ql=o(),Go=i("p"),Go.innerHTML=Ii,Dl=o(),q=i("div"),c(jn.$$.fragment),Ol=o(),Bo=i("p"),Bo.textContent=vi,Kl=o(),c(Ze.$$.fragment),er=o(),No=i("div"),c(Un.$$.fragment),Ta=o(),c(Fn.$$.fragment),Ia=o(),C=i("div"),c(Zn.$$.fragment),tr=o(),Po=i("p"),Po.textContent=Ji,nr=o(),$o=i("p"),$o.innerHTML=ji,or=o(),D=i("div"),c(kn.$$.fragment),sr=o(),Lo=i("p"),Lo.textContent=Ui,ar=o(),c(ke.$$.fragment),lr=o(),Ce=i("div"),c(Cn.$$.fragment),rr=o(),Ro=i("p"),Ro.innerHTML=Fi,ir=o(),We=i("div"),c(Wn.$$.fragment),pr=o(),Xo=i("p"),Xo.innerHTML=Zi,dr=o(),Ge=i("div"),c(Gn.$$.fragment),cr=o(),Eo=i("p"),Eo.textContent=ki,mr=o(),Be=i("div"),c(Bn.$$.fragment),gr=o(),Vo=i("p"),Vo.textContent=Ci,ur=o(),Yo=i("div"),c(Nn.$$.fragment),va=o(),c(Pn.$$.fragment),Ja=o(),Oo=i("p"),this.h()},l(e){const t=zi("svelte-u9bgzb",document.head);d=p(t,"META",{name:!0,content:!0}),t.forEach(n),w=s(e),y=p(e,"P",{}),T(y).forEach(n),b=s(e),m(M.$$.fragment,e),r=s(e),I=p(e,"DIV",{class:!0,"data-svelte-h":!0}),_(I)!=="svelte-1elo7hh"&&(I.innerHTML=fr),es=s(e),Ne=p(e,"P",{"data-svelte-h":!0}),_(Ne)!=="svelte-mlg237"&&(Ne.innerHTML=hr),ts=s(e),Pe=p(e,"P",{"data-svelte-h":!0}),_(Pe)!=="svelte-pdc76o"&&(Pe.innerHTML=_r),ns=s(e),m(le.$$.fragment,e),os=s(e),$e=p(e,"P",{"data-svelte-h":!0}),_($e)!=="svelte-193hkal"&&($e.textContent=br),ss=s(e),Le=p(e,"TABLE",{"data-svelte-h":!0}),_(Le)!=="svelte-1od5eh6"&&(Le.innerHTML=yr),as=s(e),Re=p(e,"P",{"data-svelte-h":!0}),_(Re)!=="svelte-1gsjsi1"&&(Re.textContent=Mr),ls=s(e),m(Xe.$$.fragment,e),rs=s(e),Ee=p(e,"UL",{"data-svelte-h":!0}),_(Ee)!=="svelte-459kcz"&&(Ee.innerHTML=xr),is=s(e),m(Ve.$$.fragment,e),ps=s(e),m(Ye.$$.fragment,e),ds=s(e),ze=p(e,"UL",{"data-svelte-h":!0}),_(ze)!=="svelte-k8komj"&&(ze.innerHTML=wr),cs=s(e),m(Qe.$$.fragment,e),ms=s(e),m(Ae.$$.fragment,e),gs=s(e),He=p(e,"UL",{"data-svelte-h":!0}),_(He)!=="svelte-gprzfd"&&(He.innerHTML=Tr),us=s(e),m(Se.$$.fragment,e),fs=s(e),m(qe.$$.fragment,e),hs=s(e),De=p(e,"P",{"data-svelte-h":!0}),_(De)!=="svelte-p2nvjt"&&(De.innerHTML=Ir),_s=s(e),m(Oe.$$.fragment,e),bs=s(e),Ke=p(e,"P",{"data-svelte-h":!0}),_(Ke)!=="svelte-vqyb98"&&(Ke.textContent=vr),ys=s(e),m(et.$$.fragment,e),Ms=s(e),m(tt.$$.fragment,e),xs=s(e),nt=p(e,"P",{"data-svelte-h":!0}),_(nt)!=="svelte-1pwyq51"&&(nt.innerHTML=Jr),ws=s(e),m(ot.$$.fragment,e),Ts=s(e),st=p(e,"P",{"data-svelte-h":!0}),_(st)!=="svelte-1833u4m"&&(st.textContent=jr),Is=s(e),m(at.$$.fragment,e),vs=s(e),m(lt.$$.fragment,e),Js=s(e),rt=p(e,"UL",{"data-svelte-h":!0}),_(rt)!=="svelte-44b2lm"&&(rt.innerHTML=Ur),js=s(e),m(it.$$.fragment,e),Us=s(e),m(pt.$$.fragment,e),Fs=s(e),dt=p(e,"P",{"data-svelte-h":!0}),_(dt)!=="svelte-d7sbsl"&&(dt.innerHTML=Fr),Zs=s(e),m(ct.$$.fragment,e),ks=s(e),m(mt.$$.fragment,e),Cs=s(e),gt=p(e,"P",{"data-svelte-h":!0}),_(gt)!=="svelte-1vt7dhr"&&(gt.innerHTML=Zr),Ws=s(e),m(ut.$$.fragment,e),Gs=s(e),m(re.$$.fragment,e),Bs=s(e),ft=p(e,"P",{"data-svelte-h":!0}),_(ft)!=="svelte-rmmcs6"&&(ft.textContent=kr),Ns=s(e),m(ht.$$.fragment,e),Ps=s(e),ie=p(e,"DIV",{class:!0,"data-svelte-h":!0}),_(ie)!=="svelte-1utwd2g"&&(ie.innerHTML=Cr),$s=s(e),m(_t.$$.fragment,e),Ls=s(e),bt=p(e,"P",{"data-svelte-h":!0}),_(bt)!=="svelte-sphzvr"&&(bt.textContent=Wr),Rs=s(e),m(yt.$$.fragment,e),Xs=s(e),Mt=p(e,"P",{"data-svelte-h":!0}),_(Mt)!=="svelte-1lx45wx"&&(Mt.innerHTML=Gr),Es=s(e),xt=p(e,"P",{"data-svelte-h":!0}),_(xt)!=="svelte-1ujwu7j"&&(xt.innerHTML=Br),Vs=s(e),m(pe.$$.fragment,e),Ys=s(e),m(wt.$$.fragment,e),zs=s(e),m(Tt.$$.fragment,e),Qs=s(e),It=p(e,"P",{"data-svelte-h":!0}),_(It)!=="svelte-17mig8z"&&(It.innerHTML=Nr),As=s(e),vt=p(e,"P",{"data-svelte-h":!0}),_(vt)!=="svelte-1gjlc3h"&&(vt.textContent=Pr),Hs=s(e),m(Jt.$$.fragment,e),Ss=s(e),m(jt.$$.fragment,e),qs=s(e),Ut=p(e,"P",{"data-svelte-h":!0}),_(Ut)!=="svelte-1ou2pxc"&&(Ut.textContent=$r),Ds=s(e),Ft=p(e,"P",{"data-svelte-h":!0}),_(Ft)!=="svelte-7f5w62"&&(Ft.innerHTML=Lr),Os=s(e),m(Zt.$$.fragment,e),Ks=s(e),m(kt.$$.fragment,e),ea=s(e),Ct=p(e,"P",{"data-svelte-h":!0}),_(Ct)!=="svelte-hittsd"&&(Ct.innerHTML=Rr),ta=s(e),m(de.$$.fragment,e),na=s(e),Wt=p(e,"P",{"data-svelte-h":!0}),_(Wt)!=="svelte-1pyamwr"&&(Wt.textContent=Xr),oa=s(e),Gt=p(e,"P",{"data-svelte-h":!0}),_(Gt)!=="svelte-1ljbc3s"&&(Gt.innerHTML=Er),sa=s(e),m(Bt.$$.fragment,e),aa=s(e),Nt=p(e,"P",{"data-svelte-h":!0}),_(Nt)!=="svelte-15rpvn4"&&(Nt.textContent=Vr),la=s(e),m(Pt.$$.fragment,e),ra=s(e),m($t.$$.fragment,e),ia=s(e),F=p(e,"DIV",{class:!0});var W=T(F);m(Lt.$$.fragment,W),Pa=s(W),Qn=p(W,"P",{"data-svelte-h":!0}),_(Qn)!=="svelte-77uxl4"&&(Qn.textContent=Yr),$a=s(W),An=p(W,"P",{"data-svelte-h":!0}),_(An)!=="svelte-mxgguy"&&(An.innerHTML=zr),La=s(W),V=p(W,"DIV",{class:!0});var oe=T(V);m(Rt.$$.fragment,oe),Ra=s(oe),Hn=p(oe,"P",{"data-svelte-h":!0}),_(Hn)!=="svelte-v78lg8"&&(Hn.textContent=Qr),Xa=s(oe),m(ce.$$.fragment,oe),oe.forEach(n),Ea=s(W),me=p(W,"DIV",{class:!0});var $n=T(me);m(Xt.$$.fragment,$n),Va=s($n),Sn=p($n,"P",{"data-svelte-h":!0}),_(Sn)!=="svelte-1s3c06i"&&(Sn.innerHTML=Ar),$n.forEach(n),Ya=s(W),ge=p(W,"DIV",{class:!0});var Ln=T(ge);m(Et.$$.fragment,Ln),za=s(Ln),qn=p(Ln,"P",{"data-svelte-h":!0}),_(qn)!=="svelte-pkn4ui"&&(qn.innerHTML=Hr),Ln.forEach(n),Qa=s(W),ue=p(W,"DIV",{class:!0});var Rn=T(ue);m(Vt.$$.fragment,Rn),Aa=s(Rn),Dn=p(Rn,"P",{"data-svelte-h":!0}),_(Dn)!=="svelte-14bnrb6"&&(Dn.textContent=Sr),Rn.forEach(n),Ha=s(W),fe=p(W,"DIV",{class:!0});var Xn=T(fe);m(Yt.$$.fragment,Xn),Sa=s(Xn),On=p(Xn,"P",{"data-svelte-h":!0}),_(On)!=="svelte-1xwrf7t"&&(On.textContent=qr),Xn.forEach(n),qa=s(W),Kn=p(W,"DIV",{class:!0});var Ko=T(Kn);m(zt.$$.fragment,Ko),Ko.forEach(n),W.forEach(n),pa=s(e),m(Qt.$$.fragment,e),da=s(e),Z=p(e,"DIV",{class:!0});var G=T(Z);m(At.$$.fragment,G),Da=s(G),eo=p(G,"P",{"data-svelte-h":!0}),_(eo)!=="svelte-9yovg9"&&(eo.textContent=Dr),Oa=s(G),to=p(G,"P",{"data-svelte-h":!0}),_(to)!=="svelte-mxgguy"&&(to.innerHTML=Or),Ka=s(G),Y=p(G,"DIV",{class:!0});var se=T(Y);m(Ht.$$.fragment,se),el=s(se),no=p(se,"P",{"data-svelte-h":!0}),_(no)!=="svelte-v78lg8"&&(no.textContent=Kr),tl=s(se),m(he.$$.fragment,se),se.forEach(n),nl=s(G),_e=p(G,"DIV",{class:!0});var En=T(_e);m(St.$$.fragment,En),ol=s(En),oo=p(En,"P",{"data-svelte-h":!0}),_(oo)!=="svelte-1s3c06i"&&(oo.innerHTML=ei),En.forEach(n),sl=s(G),be=p(G,"DIV",{class:!0});var Vn=T(be);m(qt.$$.fragment,Vn),al=s(Vn),so=p(Vn,"P",{"data-svelte-h":!0}),_(so)!=="svelte-pkn4ui"&&(so.innerHTML=ti),Vn.forEach(n),ll=s(G),ye=p(G,"DIV",{class:!0});var Yn=T(ye);m(Dt.$$.fragment,Yn),rl=s(Yn),ao=p(Yn,"P",{"data-svelte-h":!0}),_(ao)!=="svelte-14bnrb6"&&(ao.textContent=ni),Yn.forEach(n),il=s(G),Me=p(G,"DIV",{class:!0});var zn=T(Me);m(Ot.$$.fragment,zn),pl=s(zn),lo=p(zn,"P",{"data-svelte-h":!0}),_(lo)!=="svelte-1xwrf7t"&&(lo.textContent=oi),zn.forEach(n),dl=s(G),ro=p(G,"DIV",{class:!0});var Wi=T(ro);m(Kt.$$.fragment,Wi),Wi.forEach(n),G.forEach(n),ca=s(e),m(en.$$.fragment,e),ma=s(e),P=p(e,"DIV",{class:!0});var O=T(P);m(tn.$$.fragment,O),cl=s(O),io=p(O,"P",{"data-svelte-h":!0}),_(io)!=="svelte-9yovg9"&&(io.textContent=si),ml=s(O),po=p(O,"P",{"data-svelte-h":!0}),_(po)!=="svelte-mxgguy"&&(po.innerHTML=ai),gl=s(O),z=p(O,"DIV",{class:!0});var zo=T(z);m(nn.$$.fragment,zo),ul=s(zo),co=p(zo,"P",{"data-svelte-h":!0}),_(co)!=="svelte-v78lg8"&&(co.textContent=li),fl=s(zo),m(xe.$$.fragment,zo),zo.forEach(n),hl=s(O),mo=p(O,"DIV",{class:!0});var Gi=T(mo);m(on.$$.fragment,Gi),Gi.forEach(n),O.forEach(n),ga=s(e),m(sn.$$.fragment,e),ua=s(e),$=p(e,"DIV",{class:!0});var K=T($);m(an.$$.fragment,K),_l=s(K),go=p(K,"P",{"data-svelte-h":!0}),_(go)!=="svelte-1q2iwpg"&&(go.textContent=ri),bl=s(K),uo=p(K,"P",{"data-svelte-h":!0}),_(uo)!=="svelte-mxgguy"&&(uo.innerHTML=ii),yl=s(K),Q=p(K,"DIV",{class:!0});var Qo=T(Q);m(ln.$$.fragment,Qo),Ml=s(Qo),fo=p(Qo,"P",{"data-svelte-h":!0}),_(fo)!=="svelte-v78lg8"&&(fo.textContent=pi),xl=s(Qo),m(we.$$.fragment,Qo),Qo.forEach(n),wl=s(K),ho=p(K,"DIV",{class:!0});var Bi=T(ho);m(rn.$$.fragment,Bi),Bi.forEach(n),K.forEach(n),fa=s(e),m(pn.$$.fragment,e),ha=s(e),L=p(e,"DIV",{class:!0});var ee=T(L);m(dn.$$.fragment,ee),Tl=s(ee),_o=p(ee,"P",{"data-svelte-h":!0}),_(_o)!=="svelte-1luolc8"&&(_o.textContent=di),Il=s(ee),bo=p(ee,"P",{"data-svelte-h":!0}),_(bo)!=="svelte-mxgguy"&&(bo.innerHTML=ci),vl=s(ee),A=p(ee,"DIV",{class:!0});var Ao=T(A);m(cn.$$.fragment,Ao),Jl=s(Ao),yo=p(Ao,"P",{"data-svelte-h":!0}),_(yo)!=="svelte-v78lg8"&&(yo.textContent=mi),jl=s(Ao),m(Te.$$.fragment,Ao),Ao.forEach(n),Ul=s(ee),Mo=p(ee,"DIV",{class:!0});var Ni=T(Mo);m(mn.$$.fragment,Ni),Ni.forEach(n),ee.forEach(n),_a=s(e),m(gn.$$.fragment,e),ba=s(e),k=p(e,"DIV",{class:!0});var B=T(k);m(un.$$.fragment,B),Fl=s(B),xo=p(B,"P",{"data-svelte-h":!0}),_(xo)!=="svelte-zx53af"&&(xo.textContent=gi),Zl=s(B),wo=p(B,"P",{"data-svelte-h":!0}),_(wo)!=="svelte-mxgguy"&&(wo.innerHTML=ui),kl=s(B),H=p(B,"DIV",{class:!0});var Ho=T(H);m(fn.$$.fragment,Ho),Cl=s(Ho),To=p(Ho,"P",{"data-svelte-h":!0}),_(To)!=="svelte-v78lg8"&&(To.textContent=fi),Wl=s(Ho),m(Ie.$$.fragment,Ho),Ho.forEach(n),Gl=s(B),ve=p(B,"DIV",{class:!0});var Ua=T(ve);m(hn.$$.fragment,Ua),Bl=s(Ua),Io=p(Ua,"P",{"data-svelte-h":!0}),_(Io)!=="svelte-1s3c06i"&&(Io.innerHTML=hi),Ua.forEach(n),Nl=s(B),Je=p(B,"DIV",{class:!0});var Fa=T(Je);m(_n.$$.fragment,Fa),Pl=s(Fa),vo=p(Fa,"P",{"data-svelte-h":!0}),_(vo)!=="svelte-pkn4ui"&&(vo.innerHTML=_i),Fa.forEach(n),$l=s(B),je=p(B,"DIV",{class:!0});var Za=T(je);m(bn.$$.fragment,Za),Ll=s(Za),Jo=p(Za,"P",{"data-svelte-h":!0}),_(Jo)!=="svelte-14bnrb6"&&(Jo.textContent=bi),Za.forEach(n),Rl=s(B),Ue=p(B,"DIV",{class:!0});var ka=T(Ue);m(yn.$$.fragment,ka),Xl=s(ka),jo=p(ka,"P",{"data-svelte-h":!0}),_(jo)!=="svelte-1xwrf7t"&&(jo.textContent=yi),ka.forEach(n),El=s(B),Uo=p(B,"DIV",{class:!0});var Pi=T(Uo);m(Mn.$$.fragment,Pi),Pi.forEach(n),B.forEach(n),ya=s(e),m(xn.$$.fragment,e),Ma=s(e),R=p(e,"DIV",{class:!0});var te=T(R);m(wn.$$.fragment,te),Vl=s(te),Fo=p(te,"P",{"data-svelte-h":!0}),_(Fo)!=="svelte-9yovg9"&&(Fo.textContent=Mi),Yl=s(te),Zo=p(te,"P",{"data-svelte-h":!0}),_(Zo)!=="svelte-mxgguy"&&(Zo.innerHTML=xi),zl=s(te),S=p(te,"DIV",{class:!0});var So=T(S);m(Tn.$$.fragment,So),Ql=s(So),ko=p(So,"P",{"data-svelte-h":!0}),_(ko)!=="svelte-v78lg8"&&(ko.textContent=wi),Al=s(So),m(Fe.$$.fragment,So),So.forEach(n),Hl=s(te),Co=p(te,"DIV",{class:!0});var $i=T(Co);m(In.$$.fragment,$i),$i.forEach(n),te.forEach(n),xa=s(e),m(vn.$$.fragment,e),wa=s(e),X=p(e,"DIV",{class:!0});var ne=T(X);m(Jn.$$.fragment,ne),Sl=s(ne),Wo=p(ne,"P",{"data-svelte-h":!0}),_(Wo)!=="svelte-p02kn2"&&(Wo.textContent=Ti),ql=s(ne),Go=p(ne,"P",{"data-svelte-h":!0}),_(Go)!=="svelte-12zhw7u"&&(Go.innerHTML=Ii),Dl=s(ne),q=p(ne,"DIV",{class:!0});var qo=T(q);m(jn.$$.fragment,qo),Ol=s(qo),Bo=p(qo,"P",{"data-svelte-h":!0}),_(Bo)!=="svelte-v78lg8"&&(Bo.textContent=vi),Kl=s(qo),m(Ze.$$.fragment,qo),qo.forEach(n),er=s(ne),No=p(ne,"DIV",{class:!0});var Li=T(No);m(Un.$$.fragment,Li),Li.forEach(n),ne.forEach(n),Ta=s(e),m(Fn.$$.fragment,e),Ia=s(e),C=p(e,"DIV",{class:!0});var N=T(C);m(Zn.$$.fragment,N),tr=s(N),Po=p(N,"P",{"data-svelte-h":!0}),_(Po)!=="svelte-1d13as9"&&(Po.textContent=Ji),nr=s(N),$o=p(N,"P",{"data-svelte-h":!0}),_($o)!=="svelte-12zhw7u"&&($o.innerHTML=ji),or=s(N),D=p(N,"DIV",{class:!0});var Do=T(D);m(kn.$$.fragment,Do),sr=s(Do),Lo=p(Do,"P",{"data-svelte-h":!0}),_(Lo)!=="svelte-v78lg8"&&(Lo.textContent=Ui),ar=s(Do),m(ke.$$.fragment,Do),Do.forEach(n),lr=s(N),Ce=p(N,"DIV",{class:!0});var Ca=T(Ce);m(Cn.$$.fragment,Ca),rr=s(Ca),Ro=p(Ca,"P",{"data-svelte-h":!0}),_(Ro)!=="svelte-1s3c06i"&&(Ro.innerHTML=Fi),Ca.forEach(n),ir=s(N),We=p(N,"DIV",{class:!0});var Wa=T(We);m(Wn.$$.fragment,Wa),pr=s(Wa),Xo=p(Wa,"P",{"data-svelte-h":!0}),_(Xo)!=="svelte-pkn4ui"&&(Xo.innerHTML=Zi),Wa.forEach(n),dr=s(N),Ge=p(N,"DIV",{class:!0});var Ga=T(Ge);m(Gn.$$.fragment,Ga),cr=s(Ga),Eo=p(Ga,"P",{"data-svelte-h":!0}),_(Eo)!=="svelte-14bnrb6"&&(Eo.textContent=ki),Ga.forEach(n),mr=s(N),Be=p(N,"DIV",{class:!0});var Ba=T(Be);m(Bn.$$.fragment,Ba),gr=s(Ba),Vo=p(Ba,"P",{"data-svelte-h":!0}),_(Vo)!=="svelte-1xwrf7t"&&(Vo.textContent=Ci),Ba.forEach(n),ur=s(N),Yo=p(N,"DIV",{class:!0});var Ri=T(Yo);m(Nn.$$.fragment,Ri),Ri.forEach(n),N.forEach(n),va=s(e),m(Pn.$$.fragment,e),Ja=s(e),Oo=p(e,"P",{}),T(Oo).forEach(n),this.h()},h(){x(d,"name","hf:doc:metadata"),x(d,"content",pp),x(I,"class","flex flex-wrap space-x-1"),x(ie,"class","justify-center"),x(V,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(me,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(ge,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(ue,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(fe,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(Kn,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(F,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(Y,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(_e,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(be,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(ye,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(Me,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(ro,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(Z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(mo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(P,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(Q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(ho,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x($,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(A,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(Mo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(L,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(H,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(ve,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(Je,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(je,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(Ue,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(Uo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(k,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(S,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(Co,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(R,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(q,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(No,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(X,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(D,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(Ce,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(We,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(Ge,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(Be,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(Yo,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),x(C,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,t){a(document.head,d),l(e,w,t),l(e,y,t),l(e,b,t),g(M,e,t),l(e,r,t),l(e,I,t),l(e,es,t),l(e,Ne,t),l(e,ts,t),l(e,Pe,t),l(e,ns,t),g(le,e,t),l(e,os,t),l(e,$e,t),l(e,ss,t),l(e,Le,t),l(e,as,t),l(e,Re,t),l(e,ls,t),g(Xe,e,t),l(e,rs,t),l(e,Ee,t),l(e,is,t),g(Ve,e,t),l(e,ps,t),g(Ye,e,t),l(e,ds,t),l(e,ze,t),l(e,cs,t),g(Qe,e,t),l(e,ms,t),g(Ae,e,t),l(e,gs,t),l(e,He,t),l(e,us,t),g(Se,e,t),l(e,fs,t),g(qe,e,t),l(e,hs,t),l(e,De,t),l(e,_s,t),g(Oe,e,t),l(e,bs,t),l(e,Ke,t),l(e,ys,t),g(et,e,t),l(e,Ms,t),g(tt,e,t),l(e,xs,t),l(e,nt,t),l(e,ws,t),g(ot,e,t),l(e,Ts,t),l(e,st,t),l(e,Is,t),g(at,e,t),l(e,vs,t),g(lt,e,t),l(e,Js,t),l(e,rt,t),l(e,js,t),g(it,e,t),l(e,Us,t),g(pt,e,t),l(e,Fs,t),l(e,dt,t),l(e,Zs,t),g(ct,e,t),l(e,ks,t),g(mt,e,t),l(e,Cs,t),l(e,gt,t),l(e,Ws,t),g(ut,e,t),l(e,Gs,t),g(re,e,t),l(e,Bs,t),l(e,ft,t),l(e,Ns,t),g(ht,e,t),l(e,Ps,t),l(e,ie,t),l(e,$s,t),g(_t,e,t),l(e,Ls,t),l(e,bt,t),l(e,Rs,t),g(yt,e,t),l(e,Xs,t),l(e,Mt,t),l(e,Es,t),l(e,xt,t),l(e,Vs,t),g(pe,e,t),l(e,Ys,t),g(wt,e,t),l(e,zs,t),g(Tt,e,t),l(e,Qs,t),l(e,It,t),l(e,As,t),l(e,vt,t),l(e,Hs,t),g(Jt,e,t),l(e,Ss,t),g(jt,e,t),l(e,qs,t),l(e,Ut,t),l(e,Ds,t),l(e,Ft,t),l(e,Os,t),g(Zt,e,t),l(e,Ks,t),g(kt,e,t),l(e,ea,t),l(e,Ct,t),l(e,ta,t),g(de,e,t),l(e,na,t),l(e,Wt,t),l(e,oa,t),l(e,Gt,t),l(e,sa,t),g(Bt,e,t),l(e,aa,t),l(e,Nt,t),l(e,la,t),g(Pt,e,t),l(e,ra,t),g($t,e,t),l(e,ia,t),l(e,F,t),g(Lt,F,null),a(F,Pa),a(F,Qn),a(F,$a),a(F,An),a(F,La),a(F,V),g(Rt,V,null),a(V,Ra),a(V,Hn),a(V,Xa),g(ce,V,null),a(F,Ea),a(F,me),g(Xt,me,null),a(me,Va),a(me,Sn),a(F,Ya),a(F,ge),g(Et,ge,null),a(ge,za),a(ge,qn),a(F,Qa),a(F,ue),g(Vt,ue,null),a(ue,Aa),a(ue,Dn),a(F,Ha),a(F,fe),g(Yt,fe,null),a(fe,Sa),a(fe,On),a(F,qa),a(F,Kn),g(zt,Kn,null),l(e,pa,t),g(Qt,e,t),l(e,da,t),l(e,Z,t),g(At,Z,null),a(Z,Da),a(Z,eo),a(Z,Oa),a(Z,to),a(Z,Ka),a(Z,Y),g(Ht,Y,null),a(Y,el),a(Y,no),a(Y,tl),g(he,Y,null),a(Z,nl),a(Z,_e),g(St,_e,null),a(_e,ol),a(_e,oo),a(Z,sl),a(Z,be),g(qt,be,null),a(be,al),a(be,so),a(Z,ll),a(Z,ye),g(Dt,ye,null),a(ye,rl),a(ye,ao),a(Z,il),a(Z,Me),g(Ot,Me,null),a(Me,pl),a(Me,lo),a(Z,dl),a(Z,ro),g(Kt,ro,null),l(e,ca,t),g(en,e,t),l(e,ma,t),l(e,P,t),g(tn,P,null),a(P,cl),a(P,io),a(P,ml),a(P,po),a(P,gl),a(P,z),g(nn,z,null),a(z,ul),a(z,co),a(z,fl),g(xe,z,null),a(P,hl),a(P,mo),g(on,mo,null),l(e,ga,t),g(sn,e,t),l(e,ua,t),l(e,$,t),g(an,$,null),a($,_l),a($,go),a($,bl),a($,uo),a($,yl),a($,Q),g(ln,Q,null),a(Q,Ml),a(Q,fo),a(Q,xl),g(we,Q,null),a($,wl),a($,ho),g(rn,ho,null),l(e,fa,t),g(pn,e,t),l(e,ha,t),l(e,L,t),g(dn,L,null),a(L,Tl),a(L,_o),a(L,Il),a(L,bo),a(L,vl),a(L,A),g(cn,A,null),a(A,Jl),a(A,yo),a(A,jl),g(Te,A,null),a(L,Ul),a(L,Mo),g(mn,Mo,null),l(e,_a,t),g(gn,e,t),l(e,ba,t),l(e,k,t),g(un,k,null),a(k,Fl),a(k,xo),a(k,Zl),a(k,wo),a(k,kl),a(k,H),g(fn,H,null),a(H,Cl),a(H,To),a(H,Wl),g(Ie,H,null),a(k,Gl),a(k,ve),g(hn,ve,null),a(ve,Bl),a(ve,Io),a(k,Nl),a(k,Je),g(_n,Je,null),a(Je,Pl),a(Je,vo),a(k,$l),a(k,je),g(bn,je,null),a(je,Ll),a(je,Jo),a(k,Rl),a(k,Ue),g(yn,Ue,null),a(Ue,Xl),a(Ue,jo),a(k,El),a(k,Uo),g(Mn,Uo,null),l(e,ya,t),g(xn,e,t),l(e,Ma,t),l(e,R,t),g(wn,R,null),a(R,Vl),a(R,Fo),a(R,Yl),a(R,Zo),a(R,zl),a(R,S),g(Tn,S,null),a(S,Ql),a(S,ko),a(S,Al),g(Fe,S,null),a(R,Hl),a(R,Co),g(In,Co,null),l(e,xa,t),g(vn,e,t),l(e,wa,t),l(e,X,t),g(Jn,X,null),a(X,Sl),a(X,Wo),a(X,ql),a(X,Go),a(X,Dl),a(X,q),g(jn,q,null),a(q,Ol),a(q,Bo),a(q,Kl),g(Ze,q,null),a(X,er),a(X,No),g(Un,No,null),l(e,Ta,t),g(Fn,e,t),l(e,Ia,t),l(e,C,t),g(Zn,C,null),a(C,tr),a(C,Po),a(C,nr),a(C,$o),a(C,or),a(C,D),g(kn,D,null),a(D,sr),a(D,Lo),a(D,ar),g(ke,D,null),a(C,lr),a(C,Ce),g(Cn,Ce,null),a(Ce,rr),a(Ce,Ro),a(C,ir),a(C,We),g(Wn,We,null),a(We,pr),a(We,Xo),a(C,dr),a(C,Ge),g(Gn,Ge,null),a(Ge,cr),a(Ge,Eo),a(C,mr),a(C,Be),g(Bn,Be,null),a(Be,gr),a(Be,Vo),a(C,ur),a(C,Yo),g(Nn,Yo,null),l(e,va,t),g(Pn,e,t),l(e,Ja,t),l(e,Oo,t),ja=!0},p(e,[t]){const W={};t&2&&(W.$$scope={dirty:t,ctx:e}),le.$set(W);const oe={};t&2&&(oe.$$scope={dirty:t,ctx:e}),re.$set(oe);const $n={};t&2&&($n.$$scope={dirty:t,ctx:e}),pe.$set($n);const Ln={};t&2&&(Ln.$$scope={dirty:t,ctx:e}),de.$set(Ln);const Rn={};t&2&&(Rn.$$scope={dirty:t,ctx:e}),ce.$set(Rn);const Xn={};t&2&&(Xn.$$scope={dirty:t,ctx:e}),he.$set(Xn);const Ko={};t&2&&(Ko.$$scope={dirty:t,ctx:e}),xe.$set(Ko);const G={};t&2&&(G.$$scope={dirty:t,ctx:e}),we.$set(G);const se={};t&2&&(se.$$scope={dirty:t,ctx:e}),Te.$set(se);const En={};t&2&&(En.$$scope={dirty:t,ctx:e}),Ie.$set(En);const Vn={};t&2&&(Vn.$$scope={dirty:t,ctx:e}),Fe.$set(Vn);const Yn={};t&2&&(Yn.$$scope={dirty:t,ctx:e}),Ze.$set(Yn);const zn={};t&2&&(zn.$$scope={dirty:t,ctx:e}),ke.$set(zn)},i(e){ja||(u(M.$$.fragment,e),u(le.$$.fragment,e),u(Xe.$$.fragment,e),u(Ve.$$.fragment,e),u(Ye.$$.fragment,e),u(Qe.$$.fragment,e),u(Ae.$$.fragment,e),u(Se.$$.fragment,e),u(qe.$$.fragment,e),u(Oe.$$.fragment,e),u(et.$$.fragment,e),u(tt.$$.fragment,e),u(ot.$$.fragment,e),u(at.$$.fragment,e),u(lt.$$.fragment,e),u(it.$$.fragment,e),u(pt.$$.fragment,e),u(ct.$$.fragment,e),u(mt.$$.fragment,e),u(ut.$$.fragment,e),u(re.$$.fragment,e),u(ht.$$.fragment,e),u(_t.$$.fragment,e),u(yt.$$.fragment,e),u(pe.$$.fragment,e),u(wt.$$.fragment,e),u(Tt.$$.fragment,e),u(Jt.$$.fragment,e),u(jt.$$.fragment,e),u(Zt.$$.fragment,e),u(kt.$$.fragment,e),u(de.$$.fragment,e),u(Bt.$$.fragment,e),u(Pt.$$.fragment,e),u($t.$$.fragment,e),u(Lt.$$.fragment,e),u(Rt.$$.fragment,e),u(ce.$$.fragment,e),u(Xt.$$.fragment,e),u(Et.$$.fragment,e),u(Vt.$$.fragment,e),u(Yt.$$.fragment,e),u(zt.$$.fragment,e),u(Qt.$$.fragment,e),u(At.$$.fragment,e),u(Ht.$$.fragment,e),u(he.$$.fragment,e),u(St.$$.fragment,e),u(qt.$$.fragment,e),u(Dt.$$.fragment,e),u(Ot.$$.fragment,e),u(Kt.$$.fragment,e),u(en.$$.fragment,e),u(tn.$$.fragment,e),u(nn.$$.fragment,e),u(xe.$$.fragment,e),u(on.$$.fragment,e),u(sn.$$.fragment,e),u(an.$$.fragment,e),u(ln.$$.fragment,e),u(we.$$.fragment,e),u(rn.$$.fragment,e),u(pn.$$.fragment,e),u(dn.$$.fragment,e),u(cn.$$.fragment,e),u(Te.$$.fragment,e),u(mn.$$.fragment,e),u(gn.$$.fragment,e),u(un.$$.fragment,e),u(fn.$$.fragment,e),u(Ie.$$.fragment,e),u(hn.$$.fragment,e),u(_n.$$.fragment,e),u(bn.$$.fragment,e),u(yn.$$.fragment,e),u(Mn.$$.fragment,e),u(xn.$$.fragment,e),u(wn.$$.fragment,e),u(Tn.$$.fragment,e),u(Fe.$$.fragment,e),u(In.$$.fragment,e),u(vn.$$.fragment,e),u(Jn.$$.fragment,e),u(jn.$$.fragment,e),u(Ze.$$.fragment,e),u(Un.$$.fragment,e),u(Fn.$$.fragment,e),u(Zn.$$.fragment,e),u(kn.$$.fragment,e),u(ke.$$.fragment,e),u(Cn.$$.fragment,e),u(Wn.$$.fragment,e),u(Gn.$$.fragment,e),u(Bn.$$.fragment,e),u(Nn.$$.fragment,e),u(Pn.$$.fragment,e),ja=!0)},o(e){f(M.$$.fragment,e),f(le.$$.fragment,e),f(Xe.$$.fragment,e),f(Ve.$$.fragment,e),f(Ye.$$.fragment,e),f(Qe.$$.fragment,e),f(Ae.$$.fragment,e),f(Se.$$.fragment,e),f(qe.$$.fragment,e),f(Oe.$$.fragment,e),f(et.$$.fragment,e),f(tt.$$.fragment,e),f(ot.$$.fragment,e),f(at.$$.fragment,e),f(lt.$$.fragment,e),f(it.$$.fragment,e),f(pt.$$.fragment,e),f(ct.$$.fragment,e),f(mt.$$.fragment,e),f(ut.$$.fragment,e),f(re.$$.fragment,e),f(ht.$$.fragment,e),f(_t.$$.fragment,e),f(yt.$$.fragment,e),f(pe.$$.fragment,e),f(wt.$$.fragment,e),f(Tt.$$.fragment,e),f(Jt.$$.fragment,e),f(jt.$$.fragment,e),f(Zt.$$.fragment,e),f(kt.$$.fragment,e),f(de.$$.fragment,e),f(Bt.$$.fragment,e),f(Pt.$$.fragment,e),f($t.$$.fragment,e),f(Lt.$$.fragment,e),f(Rt.$$.fragment,e),f(ce.$$.fragment,e),f(Xt.$$.fragment,e),f(Et.$$.fragment,e),f(Vt.$$.fragment,e),f(Yt.$$.fragment,e),f(zt.$$.fragment,e),f(Qt.$$.fragment,e),f(At.$$.fragment,e),f(Ht.$$.fragment,e),f(he.$$.fragment,e),f(St.$$.fragment,e),f(qt.$$.fragment,e),f(Dt.$$.fragment,e),f(Ot.$$.fragment,e),f(Kt.$$.fragment,e),f(en.$$.fragment,e),f(tn.$$.fragment,e),f(nn.$$.fragment,e),f(xe.$$.fragment,e),f(on.$$.fragment,e),f(sn.$$.fragment,e),f(an.$$.fragment,e),f(ln.$$.fragment,e),f(we.$$.fragment,e),f(rn.$$.fragment,e),f(pn.$$.fragment,e),f(dn.$$.fragment,e),f(cn.$$.fragment,e),f(Te.$$.fragment,e),f(mn.$$.fragment,e),f(gn.$$.fragment,e),f(un.$$.fragment,e),f(fn.$$.fragment,e),f(Ie.$$.fragment,e),f(hn.$$.fragment,e),f(_n.$$.fragment,e),f(bn.$$.fragment,e),f(yn.$$.fragment,e),f(Mn.$$.fragment,e),f(xn.$$.fragment,e),f(wn.$$.fragment,e),f(Tn.$$.fragment,e),f(Fe.$$.fragment,e),f(In.$$.fragment,e),f(vn.$$.fragment,e),f(Jn.$$.fragment,e),f(jn.$$.fragment,e),f(Ze.$$.fragment,e),f(Un.$$.fragment,e),f(Fn.$$.fragment,e),f(Zn.$$.fragment,e),f(kn.$$.fragment,e),f(ke.$$.fragment,e),f(Cn.$$.fragment,e),f(Wn.$$.fragment,e),f(Gn.$$.fragment,e),f(Bn.$$.fragment,e),f(Nn.$$.fragment,e),f(Pn.$$.fragment,e),ja=!1},d(e){e&&(n(w),n(y),n(b),n(r),n(I),n(es),n(Ne),n(ts),n(Pe),n(ns),n(os),n($e),n(ss),n(Le),n(as),n(Re),n(ls),n(rs),n(Ee),n(is),n(ps),n(ds),n(ze),n(cs),n(ms),n(gs),n(He),n(us),n(fs),n(hs),n(De),n(_s),n(bs),n(Ke),n(ys),n(Ms),n(xs),n(nt),n(ws),n(Ts),n(st),n(Is),n(vs),n(Js),n(rt),n(js),n(Us),n(Fs),n(dt),n(Zs),n(ks),n(Cs),n(gt),n(Ws),n(Gs),n(Bs),n(ft),n(Ns),n(Ps),n(ie),n($s),n(Ls),n(bt),n(Rs),n(Xs),n(Mt),n(Es),n(xt),n(Vs),n(Ys),n(zs),n(Qs),n(It),n(As),n(vt),n(Hs),n(Ss),n(qs),n(Ut),n(Ds),n(Ft),n(Os),n(Ks),n(ea),n(Ct),n(ta),n(na),n(Wt),n(oa),n(Gt),n(sa),n(aa),n(Nt),n(la),n(ra),n(ia),n(F),n(pa),n(da),n(Z),n(ca),n(ma),n(P),n(ga),n(ua),n($),n(fa),n(ha),n(L),n(_a),n(ba),n(k),n(ya),n(Ma),n(R),n(xa),n(wa),n(X),n(Ta),n(Ia),n(C),n(va),n(Ja),n(Oo)),n(d),h(M,e),h(le,e),h(Xe,e),h(Ve,e),h(Ye,e),h(Qe,e),h(Ae,e),h(Se,e),h(qe,e),h(Oe,e),h(et,e),h(tt,e),h(ot,e),h(at,e),h(lt,e),h(it,e),h(pt,e),h(ct,e),h(mt,e),h(ut,e),h(re,e),h(ht,e),h(_t,e),h(yt,e),h(pe,e),h(wt,e),h(Tt,e),h(Jt,e),h(jt,e),h(Zt,e),h(kt,e),h(de,e),h(Bt,e),h(Pt,e),h($t,e),h(Lt),h(Rt),h(ce),h(Xt),h(Et),h(Vt),h(Yt),h(zt),h(Qt,e),h(At),h(Ht),h(he),h(St),h(qt),h(Dt),h(Ot),h(Kt),h(en,e),h(tn),h(nn),h(xe),h(on),h(sn,e),h(an),h(ln),h(we),h(rn),h(pn,e),h(dn),h(cn),h(Te),h(mn),h(gn,e),h(un),h(fn),h(Ie),h(hn),h(_n),h(bn),h(yn),h(Mn),h(xn,e),h(wn),h(Tn),h(Fe),h(In),h(vn,e),h(Jn),h(jn),h(Ze),h(Un),h(Fn,e),h(Zn),h(kn),h(ke),h(Cn),h(Wn),h(Gn),h(Bn),h(Nn),h(Pn,e)}}}const pp='{"title":"Flux","local":"flux","sections":[{"title":"Timestep-distilled","local":"timestep-distilled","sections":[],"depth":3},{"title":"Guidance-distilled","local":"guidance-distilled","sections":[],"depth":3},{"title":"Fill Inpainting/Outpainting","local":"fill-inpaintingoutpainting","sections":[],"depth":3},{"title":"Canny Control","local":"canny-control","sections":[],"depth":3},{"title":"Depth Control","local":"depth-control","sections":[],"depth":3},{"title":"Redux","local":"redux","sections":[],"depth":3},{"title":"Combining Flux Turbo LoRAs with Flux Control, Fill, and Redux","local":"combining-flux-turbo-loras-with-flux-control-fill-and-redux","sections":[],"depth":2},{"title":"Note about unload_lora_weights() when using Flux LoRAs","local":"note-about-unloadloraweights-when-using-flux-loras","sections":[],"depth":2},{"title":"IP-Adapter","local":"ip-adapter","sections":[],"depth":2},{"title":"Optimize","local":"optimize","sections":[{"title":"Group offloading","local":"group-offloading","sections":[],"depth":3},{"title":"Running FP16 inference","local":"running-fp16-inference","sections":[],"depth":3},{"title":"Quantization","local":"quantization","sections":[],"depth":3}],"depth":2},{"title":"Single File Loading for the FluxTransformer2DModel","local":"single-file-loading-for-the-fluxtransformer2dmodel","sections":[],"depth":2},{"title":"FluxPipeline","local":"diffusers.FluxPipeline","sections":[],"depth":2},{"title":"FluxImg2ImgPipeline","local":"diffusers.FluxImg2ImgPipeline","sections":[],"depth":2},{"title":"FluxInpaintPipeline","local":"diffusers.FluxInpaintPipeline","sections":[],"depth":2},{"title":"FluxControlNetInpaintPipeline","local":"diffusers.FluxControlNetInpaintPipeline","sections":[],"depth":2},{"title":"FluxControlNetImg2ImgPipeline","local":"diffusers.FluxControlNetImg2ImgPipeline","sections":[],"depth":2},{"title":"FluxControlPipeline","local":"diffusers.FluxControlPipeline","sections":[],"depth":2},{"title":"FluxControlImg2ImgPipeline","local":"diffusers.FluxControlImg2ImgPipeline","sections":[],"depth":2},{"title":"FluxPriorReduxPipeline","local":"diffusers.FluxPriorReduxPipeline","sections":[],"depth":2},{"title":"FluxFillPipeline","local":"diffusers.FluxFillPipeline","sections":[],"depth":2}],"depth":1}';function dp(J){return Ei(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class bp extends Vi{constructor(d){super(),Yi(this,d,dp,ip,Xi,{})}}export{bp as component}; | |
Xet Storage Details
- Size:
- 301 kB
- Xet hash:
- 9ac56f66992d2fc353aa262aebbb0b6f1e8e4418c67dea309112590d3508955e
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.