Buckets:
| import{s as at,o as lt,n as rt}from"../chunks/scheduler.53228c21.js";import{S as it,i as pt,e as r,s as o,c as u,h as ct,a as i,d as t,b as s,f as U,g,j as b,k as T,l as a,m as l,n as f,t as h,o as _,p as y}from"../chunks/index.100fac89.js";import{C as dt}from"../chunks/CopyLLMTxtMenu.ec4493db.js";import{D as S}from"../chunks/Docstring.86275b98.js";import{C as st}from"../chunks/CodeBlock.d30a6509.js";import{E as mt}from"../chunks/ExampleCodeBlock.8d9bfd44.js";import{H as Re,E as ut}from"../chunks/MermaidChart.svelte_svelte_type_style_lang.d4ef6b1e.js";function gt(pe){let m,B="Examples:",I,M,w;return M=new st({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwRmx1eENvbnRyb2xJbnBhaW50UGlwZWxpbmUlMEFmcm9tJTIwZGlmZnVzZXJzLm1vZGVscy50cmFuc2Zvcm1lcnMlMjBpbXBvcnQlMjBGbHV4VHJhbnNmb3JtZXIyRE1vZGVsJTBBZnJvbSUyMHRyYW5zZm9ybWVycyUyMGltcG9ydCUyMFQ1RW5jb2Rlck1vZGVsJTBBZnJvbSUyMGRpZmZ1c2Vycy51dGlscyUyMGltcG9ydCUyMGxvYWRfaW1hZ2UlMkMlMjBtYWtlX2ltYWdlX2dyaWQlMEFmcm9tJTIwaW1hZ2VfZ2VuX2F1eCUyMGltcG9ydCUyMERlcHRoUHJlcHJvY2Vzc29yJTIwJTIwJTIzJTIwaHR0cHMlM0ElMkYlMkZnaXRodWIuY29tJTJGaHVnZ2luZ2ZhY2UlMkZpbWFnZV9nZW5fYXV4JTBBZnJvbSUyMFBJTCUyMGltcG9ydCUyMEltYWdlJTBBaW1wb3J0JTIwbnVtcHklMjBhcyUyMG5wJTBBJTBBcGlwZSUyMCUzRCUyMEZsdXhDb250cm9sSW5wYWludFBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJibGFjay1mb3Jlc3QtbGFicyUyRkZMVVguMS1EZXB0aC1kZXYlMjIlMkMlMEElMjAlMjAlMjAlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2JTJDJTBBKSUwQSUyMyUyMHVzZSUyMGZvbGxvd2luZyUyMGxpbmVzJTIwaWYlMjB5b3UlMjBoYXZlJTIwR1BVJTIwY29uc3RyYWludHMlMEElMjMlMjAtLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0lMEF0cmFuc2Zvcm1lciUyMCUzRCUyMEZsdXhUcmFuc2Zvcm1lcjJETW9kZWwuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMnNheWFrcGF1bCUyRkZMVVguMS1EZXB0aC1kZXYtbmY0JTIyJTJDJTIwc3ViZm9sZGVyJTNEJTIydHJhbnNmb3JtZXIlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2JTBBKSUwQXRleHRfZW5jb2Rlcl8yJTIwJTNEJTIwVDVFbmNvZGVyTW9kZWwuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMnNheWFrcGF1bCUyRkZMVVguMS1EZXB0aC1kZXYtbmY0JTIyJTJDJTIwc3ViZm9sZGVyJTNEJTIydGV4dF9lbmNvZGVyXzIlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2JTBBKSUwQXBpcGUudHJhbnNmb3JtZXIlMjAlM0QlMjB0cmFuc2Zvcm1lciUwQXBpcGUudGV4dF9lbmNvZGVyXzIlMjAlM0QlMjB0ZXh0X2VuY29kZXJfMiUwQXBpcGUuZW5hYmxlX21vZGVsX2NwdV9vZmZsb2FkKCklMEElMjMlMjAtLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0lMEFwaXBlLnRvKCUyMmN1ZGElMjIpJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTIyYSUyMGJsdWUlMjByb2JvdCUyMHNpbmdpbmclMjBvcGVyYSUyMHdpdGglMjBodW1hbi1saWtlJTIwZXhwcmVzc2lvbnMlMjIlMEFpbWFnZSUyMCUzRCUyMGxvYWRfaW1hZ2UoJTIyaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRhdGFzZXRzJTJGaHVnZ2luZ2ZhY2UlMkZkb2N1bWVudGF0aW9uLWltYWdlcyUyRnJlc29sdmUlMkZtYWluJTJGcm9ib3QucG5nJTIyKSUwQSUwQWhlYWRfbWFzayUyMCUzRCUyMG5wLnplcm9zX2xpa2UoaW1hZ2UpJTBBaGVhZF9tYXNrJTVCNjUlM0E1ODAlMkMlMjAzMDAlM0E2NDIlNUQlMjAlM0QlMjAyNTUlMEFtYXNrX2ltYWdlJTIwJTNEJTIwSW1hZ2UuZnJvbWFycmF5KGhlYWRfbWFzayklMEElMEFwcm9jZXNzb3IlMjAlM0QlMjBEZXB0aFByZXByb2Nlc3Nvci5mcm9tX3ByZXRyYWluZWQoJTIyTGloZVlvdW5nJTJGZGVwdGgtYW55dGhpbmctbGFyZ2UtaGYlMjIpJTBBY29udHJvbF9pbWFnZSUyMCUzRCUyMHByb2Nlc3NvcihpbWFnZSklNUIwJTVELmNvbnZlcnQoJTIyUkdCJTIyKSUwQSUwQW91dHB1dCUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwcHJvbXB0JTNEcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwaW1hZ2UlM0RpbWFnZSUyQyUwQSUyMCUyMCUyMCUyMGNvbnRyb2xfaW1hZ2UlM0Rjb250cm9sX2ltYWdlJTJDJTBBJTIwJTIwJTIwJTIwbWFza19pbWFnZSUzRG1hc2tfaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNEMzAlMkMlMEElMjAlMjAlMjAlMjBzdHJlbmd0aCUzRDAuOSUyQyUwQSUyMCUyMCUyMCUyMGd1aWRhbmNlX3NjYWxlJTNEMTAuMCUyQyUwQSUyMCUyMCUyMCUyMGdlbmVyYXRvciUzRHRvcmNoLkdlbmVyYXRvcigpLm1hbnVhbF9zZWVkKDQyKSUyQyUwQSkuaW1hZ2VzJTVCMCU1RCUwQW1ha2VfaW1hZ2VfZ3JpZCglNUJpbWFnZSUyQyUyMGNvbnRyb2xfaW1hZ2UlMkMlMjBtYXNrX2ltYWdlJTJDJTIwb3V0cHV0LnJlc2l6ZShpbWFnZS5zaXplKSU1RCUyQyUyMHJvd3MlM0QxJTJDJTIwY29scyUzRDQpLnNhdmUoJTBBJTIwJTIwJTIwJTIwJTIyb3V0cHV0LnBuZyUyMiUwQSk=",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> FluxControlInpaintPipeline | |
| <span class="hljs-keyword">from</span> diffusers.models.transformers <span class="hljs-keyword">import</span> FluxTransformer2DModel | |
| <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5EncoderModel | |
| <span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image, make_image_grid | |
| <span class="hljs-keyword">from</span> image_gen_aux <span class="hljs-keyword">import</span> DepthPreprocessor <span class="hljs-comment"># https://github.com/huggingface/image_gen_aux</span> | |
| <span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image | |
| <span class="hljs-keyword">import</span> numpy <span class="hljs-keyword">as</span> np | |
| pipe = FluxControlInpaintPipeline.from_pretrained( | |
| <span class="hljs-string">"black-forest-labs/FLUX.1-Depth-dev"</span>, | |
| torch_dtype=torch.bfloat16, | |
| ) | |
| <span class="hljs-comment"># use following lines if you have GPU constraints</span> | |
| <span class="hljs-comment"># ---------------------------------------------------------------</span> | |
| transformer = FluxTransformer2DModel.from_pretrained( | |
| <span class="hljs-string">"sayakpaul/FLUX.1-Depth-dev-nf4"</span>, subfolder=<span class="hljs-string">"transformer"</span>, torch_dtype=torch.bfloat16 | |
| ) | |
| text_encoder_2 = T5EncoderModel.from_pretrained( | |
| <span class="hljs-string">"sayakpaul/FLUX.1-Depth-dev-nf4"</span>, subfolder=<span class="hljs-string">"text_encoder_2"</span>, torch_dtype=torch.bfloat16 | |
| ) | |
| pipe.transformer = transformer | |
| pipe.text_encoder_2 = text_encoder_2 | |
| pipe.enable_model_cpu_offload() | |
| <span class="hljs-comment"># ---------------------------------------------------------------</span> | |
| pipe.to(<span class="hljs-string">"cuda"</span>) | |
| prompt = <span class="hljs-string">"a blue robot singing opera with human-like expressions"</span> | |
| image = load_image(<span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png"</span>) | |
| head_mask = np.zeros_like(image) | |
| head_mask[<span class="hljs-number">65</span>:<span class="hljs-number">580</span>, <span class="hljs-number">300</span>:<span class="hljs-number">642</span>] = <span class="hljs-number">255</span> | |
| mask_image = Image.fromarray(head_mask) | |
| processor = DepthPreprocessor.from_pretrained(<span class="hljs-string">"LiheYoung/depth-anything-large-hf"</span>) | |
| control_image = processor(image)[<span class="hljs-number">0</span>].convert(<span class="hljs-string">"RGB"</span>) | |
| output = pipe( | |
| prompt=prompt, | |
| image=image, | |
| control_image=control_image, | |
| mask_image=mask_image, | |
| num_inference_steps=<span class="hljs-number">30</span>, | |
| strength=<span class="hljs-number">0.9</span>, | |
| guidance_scale=<span class="hljs-number">10.0</span>, | |
| generator=torch.Generator().manual_seed(<span class="hljs-number">42</span>), | |
| ).images[<span class="hljs-number">0</span>] | |
| make_image_grid([image, control_image, mask_image, output.resize(image.size)], rows=<span class="hljs-number">1</span>, cols=<span class="hljs-number">4</span>).save( | |
| <span class="hljs-string">"output.png"</span> | |
| )`,wrap:!1}}),{c(){m=r("p"),m.textContent=B,I=o(),u(M.$$.fragment)},l(d){m=i(d,"P",{"data-svelte-h":!0}),b(m)!=="svelte-kvfsh7"&&(m.textContent=B),I=s(d),g(M.$$.fragment,d)},m(d,x){l(d,m,x),l(d,I,x),f(M,d,x),w=!0},p:rt,i(d){w||(h(M.$$.fragment,d),w=!0)},o(d){_(M.$$.fragment,d),w=!1},d(d){d&&(t(m),t(I)),y(M,d)}}}function ft(pe){let m,B,I,M,w,d,x,ce,C,Xe='<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>',de,G,ze="FluxControlInpaintPipeline is an implementation of Inpainting for Flux.1 Depth/Canny models. It is a pipeline that allows you to inpaint images using the Flux.1 Depth/Canny models. The pipeline takes an image and a mask as input and returns the inpainted image.",me,V,He="FLUX.1 Depth and Canny [dev] is a 12 billion parameter rectified flow transformer capable of generating an image based on a text description while following the structure of a given input image. <strong>This is not a ControlNet model</strong>.",ue,E,Ye='<thead><tr><th>Control type</th> <th>Developer</th> <th>Link</th></tr></thead> <tbody><tr><td>Depth</td> <td><a href="https://huggingface.co/black-forest-labs" rel="nofollow">Black Forest Labs</a></td> <td><a href="https://huggingface.co/black-forest-labs/FLUX.1-Depth-dev" rel="nofollow">Link</a></td></tr> <tr><td>Canny</td> <td><a href="https://huggingface.co/black-forest-labs" rel="nofollow">Black Forest Labs</a></td> <td><a href="https://huggingface.co/black-forest-labs/FLUX.1-Canny-dev" rel="nofollow">Link</a></td></tr></tbody>',ge,L,De='<p>Flux can be quite expensive to run on consumer hardware devices. However, you can perform a suite of optimizations to run it faster and in a more memory-friendly manner. Check out <a href="https://huggingface.co/blog/sd3#memory-optimizations-for-sd3" rel="nofollow">this section</a> for more details. Additionally, Flux can benefit from quantization for memory efficiency with a trade-off in inference latency. Refer to <a href="https://huggingface.co/blog/quanto-diffusers" rel="nofollow">this blog post</a> to learn more. For an exhaustive list of resources, check out <a href="https://gist.github.com/sayakpaul/b664605caf0aa3bf8585ab109dd5ac9c" rel="nofollow">this gist</a>.</p>',fe,$,he,P,_e,p,N,Ce,O,Ae="The Flux pipeline for image inpainting using Flux-dev-Depth/Canny.",Le,K,Qe='Reference: <a href="https://blackforestlabs.ai/announcing-black-forest-labs/" rel="nofollow">https://blackforestlabs.ai/announcing-black-forest-labs/</a>',ke,J,R,Ze,ee,qe="Function invoked when calling the pipeline for generation.",Fe,k,je,Z,X,We,te,Oe=`Disable sliced VAE decoding. If <code>enable_vae_slicing</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,Se,F,z,Be,ne,Ke=`Disable tiled VAE decoding. If <code>enable_vae_tiling</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,Ge,j,H,Ve,oe,et=`Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to | |
| compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.`,Ee,W,Y,$e,se,tt=`Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to | |
| compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow | |
| processing larger images.`,Pe,ae,D,ye,A,be,v,Q,Ne,le,nt="Output class for Flux image generation pipelines.",Me,q,we,ie,Te;return w=new dt({props:{containerStyle:"float: right; margin-left: 10px; display: inline-flex; position: relative; z-index: 10;"}}),x=new Re({props:{title:"FluxControlInpaint",local:"fluxcontrolinpaint",headingTag:"h1"}}),$=new st({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwRmx1eENvbnRyb2xJbnBhaW50UGlwZWxpbmUlMEFmcm9tJTIwZGlmZnVzZXJzLm1vZGVscy50cmFuc2Zvcm1lcnMlMjBpbXBvcnQlMjBGbHV4VHJhbnNmb3JtZXIyRE1vZGVsJTBBZnJvbSUyMHRyYW5zZm9ybWVycyUyMGltcG9ydCUyMFQ1RW5jb2Rlck1vZGVsJTBBZnJvbSUyMGRpZmZ1c2Vycy51dGlscyUyMGltcG9ydCUyMGxvYWRfaW1hZ2UlMkMlMjBtYWtlX2ltYWdlX2dyaWQlMEFmcm9tJTIwaW1hZ2VfZ2VuX2F1eCUyMGltcG9ydCUyMERlcHRoUHJlcHJvY2Vzc29yJTIwJTIzJTIwaHR0cHMlM0ElMkYlMkZnaXRodWIuY29tJTJGaHVnZ2luZ2ZhY2UlMkZpbWFnZV9nZW5fYXV4JTBBZnJvbSUyMFBJTCUyMGltcG9ydCUyMEltYWdlJTBBaW1wb3J0JTIwbnVtcHklMjBhcyUyMG5wJTBBJTBBcGlwZSUyMCUzRCUyMEZsdXhDb250cm9sSW5wYWludFBpcGVsaW5lLmZyb21fcHJldHJhaW5lZCglMEElMjAlMjAlMjAlMjAlMjJibGFjay1mb3Jlc3QtbGFicyUyRkZMVVguMS1EZXB0aC1kZXYlMjIlMkMlMEElMjAlMjAlMjAlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2JTJDJTBBKSUwQSUyMyUyMHVzZSUyMGZvbGxvd2luZyUyMGxpbmVzJTIwaWYlMjB5b3UlMjBoYXZlJTIwR1BVJTIwY29uc3RyYWludHMlMEElMjMlMjAtLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0lMEF0cmFuc2Zvcm1lciUyMCUzRCUyMEZsdXhUcmFuc2Zvcm1lcjJETW9kZWwuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMnNheWFrcGF1bCUyRkZMVVguMS1EZXB0aC1kZXYtbmY0JTIyJTJDJTIwc3ViZm9sZGVyJTNEJTIydHJhbnNmb3JtZXIlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2JTBBKSUwQXRleHRfZW5jb2Rlcl8yJTIwJTNEJTIwVDVFbmNvZGVyTW9kZWwuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMnNheWFrcGF1bCUyRkZMVVguMS1EZXB0aC1kZXYtbmY0JTIyJTJDJTIwc3ViZm9sZGVyJTNEJTIydGV4dF9lbmNvZGVyXzIlMjIlMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2JTBBKSUwQXBpcGUudHJhbnNmb3JtZXIlMjAlM0QlMjB0cmFuc2Zvcm1lciUwQXBpcGUudGV4dF9lbmNvZGVyXzIlMjAlM0QlMjB0ZXh0X2VuY29kZXJfMiUwQXBpcGUuZW5hYmxlX21vZGVsX2NwdV9vZmZsb2FkKCklMEElMjMlMjAtLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0lMEFwaXBlLnRvKCUyMmN1ZGElMjIpJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTIyYSUyMGJsdWUlMjByb2JvdCUyMHNpbmdpbmclMjBvcGVyYSUyMHdpdGglMjBodW1hbi1saWtlJTIwZXhwcmVzc2lvbnMlMjIlMEFpbWFnZSUyMCUzRCUyMGxvYWRfaW1hZ2UoJTIyaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmRhdGFzZXRzJTJGaHVnZ2luZ2ZhY2UlMkZkb2N1bWVudGF0aW9uLWltYWdlcyUyRnJlc29sdmUlMkZtYWluJTJGcm9ib3QucG5nJTIyKSUwQSUwQWhlYWRfbWFzayUyMCUzRCUyMG5wLnplcm9zX2xpa2UoaW1hZ2UpJTBBaGVhZF9tYXNrJTVCNjUlM0E1ODAlMkMzMDAlM0E2NDIlNUQlMjAlM0QlMjAyNTUlMEFtYXNrX2ltYWdlJTIwJTNEJTIwSW1hZ2UuZnJvbWFycmF5KGhlYWRfbWFzayklMEElMEFwcm9jZXNzb3IlMjAlM0QlMjBEZXB0aFByZXByb2Nlc3Nvci5mcm9tX3ByZXRyYWluZWQoJTIyTGloZVlvdW5nJTJGZGVwdGgtYW55dGhpbmctbGFyZ2UtaGYlMjIpJTBBY29udHJvbF9pbWFnZSUyMCUzRCUyMHByb2Nlc3NvcihpbWFnZSklNUIwJTVELmNvbnZlcnQoJTIyUkdCJTIyKSUwQSUwQW91dHB1dCUyMCUzRCUyMHBpcGUoJTBBJTIwJTIwJTIwJTIwcHJvbXB0JTNEcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwaW1hZ2UlM0RpbWFnZSUyQyUwQSUyMCUyMCUyMCUyMGNvbnRyb2xfaW1hZ2UlM0Rjb250cm9sX2ltYWdlJTJDJTBBJTIwJTIwJTIwJTIwbWFza19pbWFnZSUzRG1hc2tfaW1hZ2UlMkMlMEElMjAlMjAlMjAlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNEMzAlMkMlMEElMjAlMjAlMjAlMjBzdHJlbmd0aCUzRDAuOSUyQyUwQSUyMCUyMCUyMCUyMGd1aWRhbmNlX3NjYWxlJTNEMTAuMCUyQyUwQSUyMCUyMCUyMCUyMGdlbmVyYXRvciUzRHRvcmNoLkdlbmVyYXRvcigpLm1hbnVhbF9zZWVkKDQyKSUyQyUwQSkuaW1hZ2VzJTVCMCU1RCUwQW1ha2VfaW1hZ2VfZ3JpZCglNUJpbWFnZSUyQyUyMGNvbnRyb2xfaW1hZ2UlMkMlMjBtYXNrX2ltYWdlJTJDJTIwb3V0cHV0LnJlc2l6ZShpbWFnZS5zaXplKSU1RCUyQyUyMHJvd3MlM0QxJTJDJTIwY29scyUzRDQpLnNhdmUoJTIyb3V0cHV0LnBuZyUyMik=",highlighted:`<span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> FluxControlInpaintPipeline | |
| <span class="hljs-keyword">from</span> diffusers.models.transformers <span class="hljs-keyword">import</span> FluxTransformer2DModel | |
| <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> T5EncoderModel | |
| <span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image, make_image_grid | |
| <span class="hljs-keyword">from</span> image_gen_aux <span class="hljs-keyword">import</span> DepthPreprocessor <span class="hljs-comment"># https://github.com/huggingface/image_gen_aux</span> | |
| <span class="hljs-keyword">from</span> PIL <span class="hljs-keyword">import</span> Image | |
| <span class="hljs-keyword">import</span> numpy <span class="hljs-keyword">as</span> np | |
| pipe = FluxControlInpaintPipeline.from_pretrained( | |
| <span class="hljs-string">"black-forest-labs/FLUX.1-Depth-dev"</span>, | |
| torch_dtype=torch.bfloat16, | |
| ) | |
| <span class="hljs-comment"># use following lines if you have GPU constraints</span> | |
| <span class="hljs-comment"># ---------------------------------------------------------------</span> | |
| transformer = FluxTransformer2DModel.from_pretrained( | |
| <span class="hljs-string">"sayakpaul/FLUX.1-Depth-dev-nf4"</span>, subfolder=<span class="hljs-string">"transformer"</span>, torch_dtype=torch.bfloat16 | |
| ) | |
| text_encoder_2 = T5EncoderModel.from_pretrained( | |
| <span class="hljs-string">"sayakpaul/FLUX.1-Depth-dev-nf4"</span>, subfolder=<span class="hljs-string">"text_encoder_2"</span>, torch_dtype=torch.bfloat16 | |
| ) | |
| pipe.transformer = transformer | |
| pipe.text_encoder_2 = text_encoder_2 | |
| pipe.enable_model_cpu_offload() | |
| <span class="hljs-comment"># ---------------------------------------------------------------</span> | |
| pipe.to(<span class="hljs-string">"cuda"</span>) | |
| prompt = <span class="hljs-string">"a blue robot singing opera with human-like expressions"</span> | |
| image = load_image(<span class="hljs-string">"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png"</span>) | |
| head_mask = np.zeros_like(image) | |
| head_mask[<span class="hljs-number">65</span>:<span class="hljs-number">580</span>,<span class="hljs-number">300</span>:<span class="hljs-number">642</span>] = <span class="hljs-number">255</span> | |
| mask_image = Image.fromarray(head_mask) | |
| processor = DepthPreprocessor.from_pretrained(<span class="hljs-string">"LiheYoung/depth-anything-large-hf"</span>) | |
| control_image = processor(image)[<span class="hljs-number">0</span>].convert(<span class="hljs-string">"RGB"</span>) | |
| output = pipe( | |
| prompt=prompt, | |
| image=image, | |
| control_image=control_image, | |
| mask_image=mask_image, | |
| num_inference_steps=<span class="hljs-number">30</span>, | |
| strength=<span class="hljs-number">0.9</span>, | |
| guidance_scale=<span class="hljs-number">10.0</span>, | |
| generator=torch.Generator().manual_seed(<span class="hljs-number">42</span>), | |
| ).images[<span class="hljs-number">0</span>] | |
| make_image_grid([image, control_image, mask_image, output.resize(image.size)], rows=<span class="hljs-number">1</span>, cols=<span class="hljs-number">4</span>).save(<span class="hljs-string">"output.png"</span>)`,wrap:!1}}),P=new Re({props:{title:"FluxControlInpaintPipeline",local:"diffusers.FluxControlInpaintPipeline",headingTag:"h2"}}),N=new S({props:{name:"class diffusers.FluxControlInpaintPipeline",anchor:"diffusers.FluxControlInpaintPipeline",parameters:[{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKL"},{name:"text_encoder",val:": CLIPTextModel"},{name:"tokenizer",val:": CLIPTokenizer"},{name:"text_encoder_2",val:": T5EncoderModel"},{name:"tokenizer_2",val:": T5TokenizerFast"},{name:"transformer",val:": FluxTransformer2DModel"}],parametersDescription:[{anchor:"diffusers.FluxControlInpaintPipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_12631/en/api/models/flux_transformer#diffusers.FluxTransformer2DModel">FluxTransformer2DModel</a>) — | |
| Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.`,name:"transformer"},{anchor:"diffusers.FluxControlInpaintPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_12631/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) — | |
| A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.FluxControlInpaintPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_12631/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) — | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.`,name:"vae"},{anchor:"diffusers.FluxControlInpaintPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>CLIPTextModel</code>) — | |
| <a href="https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel" rel="nofollow">CLIP</a>, specifically | |
| the <a href="https://huggingface.co/openai/clip-vit-large-patch14" rel="nofollow">clip-vit-large-patch14</a> variant.`,name:"text_encoder"},{anchor:"diffusers.FluxControlInpaintPipeline.text_encoder_2",description:`<strong>text_encoder_2</strong> (<code>T5EncoderModel</code>) — | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel" rel="nofollow">T5</a>, specifically | |
| the <a href="https://huggingface.co/google/t5-v1_1-xxl" rel="nofollow">google/t5-v1_1-xxl</a> variant.`,name:"text_encoder_2"},{anchor:"diffusers.FluxControlInpaintPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>CLIPTokenizer</code>) — | |
| Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer" rel="nofollow">CLIPTokenizer</a>.`,name:"tokenizer"},{anchor:"diffusers.FluxControlInpaintPipeline.tokenizer_2",description:`<strong>tokenizer_2</strong> (<code>T5TokenizerFast</code>) — | |
| Second Tokenizer of class | |
| <a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast" rel="nofollow">T5TokenizerFast</a>.`,name:"tokenizer_2"}],source:"https://github.com/huggingface/diffusers/blob/vr_12631/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py#L205"}}),R=new S({props:{name:"__call__",anchor:"diffusers.FluxControlInpaintPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"prompt_2",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"control_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"mask_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"masked_image_latents",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"strength",val:": float = 0.6"},{name:"num_inference_steps",val:": int = 28"},{name:"sigmas",val:": typing.Optional[typing.List[float]] = None"},{name:"guidance_scale",val:": float = 7.0"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.FloatTensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"pooled_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"joint_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 512"}],parametersDescription:[{anchor:"diffusers.FluxControlInpaintPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.FluxControlInpaintPipeline.__call__.prompt_2",description:`<strong>prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to be sent to <code>tokenizer_2</code> and <code>text_encoder_2</code>. If not defined, <code>prompt</code> is | |
| will be used instead`,name:"prompt_2"},{anchor:"diffusers.FluxControlInpaintPipeline.__call__.image",description:`<strong>image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, or <code>List[np.ndarray]</code>) — | |
| <code>Image</code>, numpy array or tensor representing an image batch to be used as the starting point. For both | |
| numpy array and pytorch tensor, the expected value range is between <code>[0, 1]</code> If it’s a tensor or a list | |
| or tensors, the expected shape should be <code>(B, C, H, W)</code> or <code>(C, H, W)</code>. If it is a numpy array or a | |
| list of arrays, the expected shape should be <code>(B, H, W, C)</code> or <code>(H, W, C)</code> It can also accept image | |
| latents as <code>image</code>, but if passing latents directly it is not encoded again.`,name:"image"},{anchor:"diffusers.FluxControlInpaintPipeline.__call__.control_image",description:`<strong>control_image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, <code>List[np.ndarray]</code>, — | |
| <code>List[List[torch.Tensor]]</code>, <code>List[List[np.ndarray]]</code> or <code>List[List[PIL.Image.Image]]</code>): | |
| The ControlNet input condition to provide guidance to the <code>unet</code> for generation. If the type is | |
| specified as <code>torch.Tensor</code>, it is passed to ControlNet as is. <code>PIL.Image.Image</code> can also be accepted | |
| as an image. The dimensions of the output image defaults to <code>image</code>’s dimensions. If height and/or | |
| width are passed, <code>image</code> is resized accordingly. If multiple ControlNets are specified in <code>init</code>, | |
| images must be passed as a list such that each element of the list can be correctly batched for input | |
| to a single ControlNet.`,name:"control_image"},{anchor:"diffusers.FluxControlInpaintPipeline.__call__.mask_image",description:`<strong>mask_image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, or <code>List[np.ndarray]</code>) — | |
| <code>Image</code>, numpy array or tensor representing an image batch to mask <code>image</code>. White pixels in the mask | |
| are repainted while black pixels are preserved. If <code>mask_image</code> is a PIL image, it is converted to a | |
| single channel (luminance) before use. If it’s a numpy array or pytorch tensor, it should contain one | |
| color channel (L) instead of 3, so the expected shape for pytorch tensor would be <code>(B, 1, H, W)</code>, <code>(B, H, W)</code>, <code>(1, H, W)</code>, <code>(H, W)</code>. And for numpy array would be for <code>(B, H, W, 1)</code>, <code>(B, H, W)</code>, <code>(H, W, 1)</code>, or <code>(H, W)</code>.`,name:"mask_image"},{anchor:"diffusers.FluxControlInpaintPipeline.__call__.mask_image_latent",description:`<strong>mask_image_latent</strong> (<code>torch.Tensor</code>, <code>List[torch.Tensor]</code>) — | |
| <code>Tensor</code> representing an image batch to mask <code>image</code> generated by VAE. If not provided, the mask | |
| latents tensor will be generated by <code>mask_image</code>.`,name:"mask_image_latent"},{anchor:"diffusers.FluxControlInpaintPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The height in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"height"},{anchor:"diffusers.FluxControlInpaintPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) — | |
| The width in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"width"},{anchor:"diffusers.FluxControlInpaintPipeline.__call__.strength",description:`<strong>strength</strong> (<code>float</code>, <em>optional</em>, defaults to 1.0) — | |
| Indicates extent to transform the reference <code>image</code>. Must be between 0 and 1. <code>image</code> is used as a | |
| starting point and more noise is added the higher the <code>strength</code>. The number of denoising steps depends | |
| on the amount of noise initially added. When <code>strength</code> is 1, added noise is maximum and the denoising | |
| process runs for the full number of iterations specified in <code>num_inference_steps</code>. A value of 1 | |
| essentially ignores <code>image</code>.`,name:"strength"},{anchor:"diffusers.FluxControlInpaintPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.FluxControlInpaintPipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) — | |
| Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in | |
| their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed | |
| will be used.`,name:"sigmas"},{anchor:"diffusers.FluxControlInpaintPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 7.0) — | |
| Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion | |
| Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2. | |
| of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting | |
| <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images that are closely linked to | |
| the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.FluxControlInpaintPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.FluxControlInpaintPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.FluxControlInpaintPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will be generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.FluxControlInpaintPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.FluxControlInpaintPipeline.__call__.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. | |
| If not provided, pooled text embeddings will be generated from <code>prompt</code> input argument.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.FluxControlInpaintPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.FluxControlInpaintPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.flux.FluxPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.FluxControlInpaintPipeline.__call__.joint_attention_kwargs",description:`<strong>joint_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"joint_attention_kwargs"},{anchor:"diffusers.FluxControlInpaintPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.FluxControlInpaintPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.FluxControlInpaintPipeline.__call__.max_sequence_length",description:"<strong>max_sequence_length</strong> (<code>int</code> defaults to 512) — Maximum sequence length to use with the <code>prompt</code>.",name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_12631/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py#L805",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.flux.FluxPipelineOutput</code> if <code>return_dict</code> | |
| is True, otherwise a <code>tuple</code>. When returning a tuple, the first element is a list with the generated | |
| images.</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><code>~pipelines.flux.FluxPipelineOutput</code> or <code>tuple</code></p> | |
| `}}),k=new mt({props:{anchor:"diffusers.FluxControlInpaintPipeline.__call__.example",$$slots:{default:[gt]},$$scope:{ctx:pe}}}),X=new S({props:{name:"disable_vae_slicing",anchor:"diffusers.FluxControlInpaintPipeline.disable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12631/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py#L589"}}),z=new S({props:{name:"disable_vae_tiling",anchor:"diffusers.FluxControlInpaintPipeline.disable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12631/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py#L616"}}),H=new S({props:{name:"enable_vae_slicing",anchor:"diffusers.FluxControlInpaintPipeline.enable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12631/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py#L576"}}),Y=new S({props:{name:"enable_vae_tiling",anchor:"diffusers.FluxControlInpaintPipeline.enable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12631/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py#L602"}}),D=new S({props:{name:"encode_prompt",anchor:"diffusers.FluxControlInpaintPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"prompt_2",val:": typing.Union[str, typing.List[str], NoneType] = None"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"pooled_prompt_embeds",val:": typing.Optional[torch.FloatTensor] = None"},{name:"max_sequence_length",val:": int = 512"},{name:"lora_scale",val:": typing.Optional[float] = None"}],parametersDescription:[{anchor:"diffusers.FluxControlInpaintPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.FluxControlInpaintPipeline.encode_prompt.prompt_2",description:`<strong>prompt_2</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to be sent to the <code>tokenizer_2</code> and <code>text_encoder_2</code>. If not defined, <code>prompt</code> is | |
| used in all text-encoders`,name:"prompt_2"},{anchor:"diffusers.FluxControlInpaintPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>): | |
| torch device`,name:"device"},{anchor:"diffusers.FluxControlInpaintPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.FluxControlInpaintPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.FluxControlInpaintPipeline.encode_prompt.pooled_prompt_embeds",description:`<strong>pooled_prompt_embeds</strong> (<code>torch.FloatTensor</code>, <em>optional</em>) — | |
| Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. | |
| If not provided, pooled text embeddings will be generated from <code>prompt</code> input argument.`,name:"pooled_prompt_embeds"},{anchor:"diffusers.FluxControlInpaintPipeline.encode_prompt.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>, <em>optional</em>) — | |
| A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.`,name:"lora_scale"}],source:"https://github.com/huggingface/diffusers/blob/vr_12631/src/diffusers/pipelines/flux/pipeline_flux_control_inpaint.py#L375"}}),A=new Re({props:{title:"FluxPipelineOutput",local:"diffusers.pipelines.flux.pipeline_output.FluxPipelineOutput",headingTag:"h2"}}),Q=new S({props:{name:"class diffusers.pipelines.flux.pipeline_output.FluxPipelineOutput",anchor:"diffusers.pipelines.flux.pipeline_output.FluxPipelineOutput",parameters:[{name:"images",val:": typing.Union[typing.List[PIL.Image.Image], numpy.ndarray]"}],parametersDescription:[{anchor:"diffusers.pipelines.flux.pipeline_output.FluxPipelineOutput.images",description:`<strong>images</strong> (<code>List[PIL.Image.Image]</code> or <code>torch.Tensor</code> or <code>np.ndarray</code>) — | |
| List of denoised PIL images of length <code>batch_size</code> or numpy array or torch tensor of shape <code>(batch_size, height, width, num_channels)</code>. PIL images or numpy array present the denoised images of the diffusion | |
| pipeline. Torch tensors can represent either the denoised images or the intermediate latents ready to be | |
| passed to the decoder.`,name:"images"}],source:"https://github.com/huggingface/diffusers/blob/vr_12631/src/diffusers/pipelines/flux/pipeline_output.py#L12"}}),q=new ut({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/control_flux_inpaint.md"}}),{c(){m=r("meta"),B=o(),I=r("p"),M=o(),u(w.$$.fragment),d=o(),u(x.$$.fragment),ce=o(),C=r("div"),C.innerHTML=Xe,de=o(),G=r("p"),G.textContent=ze,me=o(),V=r("p"),V.innerHTML=He,ue=o(),E=r("table"),E.innerHTML=Ye,ge=o(),L=r("blockquote"),L.innerHTML=De,fe=o(),u($.$$.fragment),he=o(),u(P.$$.fragment),_e=o(),p=r("div"),u(N.$$.fragment),Ce=o(),O=r("p"),O.textContent=Ae,Le=o(),K=r("p"),K.innerHTML=Qe,ke=o(),J=r("div"),u(R.$$.fragment),Ze=o(),ee=r("p"),ee.textContent=qe,Fe=o(),u(k.$$.fragment),je=o(),Z=r("div"),u(X.$$.fragment),We=o(),te=r("p"),te.innerHTML=Oe,Se=o(),F=r("div"),u(z.$$.fragment),Be=o(),ne=r("p"),ne.innerHTML=Ke,Ge=o(),j=r("div"),u(H.$$.fragment),Ve=o(),oe=r("p"),oe.textContent=et,Ee=o(),W=r("div"),u(Y.$$.fragment),$e=o(),se=r("p"),se.textContent=tt,Pe=o(),ae=r("div"),u(D.$$.fragment),ye=o(),u(A.$$.fragment),be=o(),v=r("div"),u(Q.$$.fragment),Ne=o(),le=r("p"),le.textContent=nt,Me=o(),u(q.$$.fragment),we=o(),ie=r("p"),this.h()},l(e){const n=ct("svelte-u9bgzb",document.head);m=i(n,"META",{name:!0,content:!0}),n.forEach(t),B=s(e),I=i(e,"P",{}),U(I).forEach(t),M=s(e),g(w.$$.fragment,e),d=s(e),g(x.$$.fragment,e),ce=s(e),C=i(e,"DIV",{class:!0,"data-svelte-h":!0}),b(C)!=="svelte-si9ct8"&&(C.innerHTML=Xe),de=s(e),G=i(e,"P",{"data-svelte-h":!0}),b(G)!=="svelte-thlb2p"&&(G.textContent=ze),me=s(e),V=i(e,"P",{"data-svelte-h":!0}),b(V)!=="svelte-1poqhj5"&&(V.innerHTML=He),ue=s(e),E=i(e,"TABLE",{"data-svelte-h":!0}),b(E)!=="svelte-15v4ttf"&&(E.innerHTML=Ye),ge=s(e),L=i(e,"BLOCKQUOTE",{class:!0,"data-svelte-h":!0}),b(L)!=="svelte-1unq3g6"&&(L.innerHTML=De),fe=s(e),g($.$$.fragment,e),he=s(e),g(P.$$.fragment,e),_e=s(e),p=i(e,"DIV",{class:!0});var c=U(p);g(N.$$.fragment,c),Ce=s(c),O=i(c,"P",{"data-svelte-h":!0}),b(O)!=="svelte-1t0mqls"&&(O.textContent=Ae),Le=s(c),K=i(c,"P",{"data-svelte-h":!0}),b(K)!=="svelte-mxgguy"&&(K.innerHTML=Qe),ke=s(c),J=i(c,"DIV",{class:!0});var re=U(J);g(R.$$.fragment,re),Ze=s(re),ee=i(re,"P",{"data-svelte-h":!0}),b(ee)!=="svelte-v78lg8"&&(ee.textContent=qe),Fe=s(re),g(k.$$.fragment,re),re.forEach(t),je=s(c),Z=i(c,"DIV",{class:!0});var xe=U(Z);g(X.$$.fragment,xe),We=s(xe),te=i(xe,"P",{"data-svelte-h":!0}),b(te)!=="svelte-1s3c06i"&&(te.innerHTML=Oe),xe.forEach(t),Se=s(c),F=i(c,"DIV",{class:!0});var Ie=U(F);g(z.$$.fragment,Ie),Be=s(Ie),ne=i(Ie,"P",{"data-svelte-h":!0}),b(ne)!=="svelte-pkn4ui"&&(ne.innerHTML=Ke),Ie.forEach(t),Ge=s(c),j=i(c,"DIV",{class:!0});var Je=U(j);g(H.$$.fragment,Je),Ve=s(Je),oe=i(Je,"P",{"data-svelte-h":!0}),b(oe)!=="svelte-14bnrb6"&&(oe.textContent=et),Je.forEach(t),Ee=s(c),W=i(c,"DIV",{class:!0});var Ue=U(W);g(Y.$$.fragment,Ue),$e=s(Ue),se=i(Ue,"P",{"data-svelte-h":!0}),b(se)!=="svelte-1xwrf7t"&&(se.textContent=tt),Ue.forEach(t),Pe=s(c),ae=i(c,"DIV",{class:!0});var ot=U(ae);g(D.$$.fragment,ot),ot.forEach(t),c.forEach(t),ye=s(e),g(A.$$.fragment,e),be=s(e),v=i(e,"DIV",{class:!0});var ve=U(v);g(Q.$$.fragment,ve),Ne=s(ve),le=i(ve,"P",{"data-svelte-h":!0}),b(le)!=="svelte-10l6oeh"&&(le.textContent=nt),ve.forEach(t),Me=s(e),g(q.$$.fragment,e),we=s(e),ie=i(e,"P",{}),U(ie).forEach(t),this.h()},h(){T(m,"name","hf:doc:metadata"),T(m,"content",ht),T(C,"class","flex flex-wrap space-x-1"),T(L,"class","tip"),T(J,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(Z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(F,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(j,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(W,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(ae,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(p,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(v,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,n){a(document.head,m),l(e,B,n),l(e,I,n),l(e,M,n),f(w,e,n),l(e,d,n),f(x,e,n),l(e,ce,n),l(e,C,n),l(e,de,n),l(e,G,n),l(e,me,n),l(e,V,n),l(e,ue,n),l(e,E,n),l(e,ge,n),l(e,L,n),l(e,fe,n),f($,e,n),l(e,he,n),f(P,e,n),l(e,_e,n),l(e,p,n),f(N,p,null),a(p,Ce),a(p,O),a(p,Le),a(p,K),a(p,ke),a(p,J),f(R,J,null),a(J,Ze),a(J,ee),a(J,Fe),f(k,J,null),a(p,je),a(p,Z),f(X,Z,null),a(Z,We),a(Z,te),a(p,Se),a(p,F),f(z,F,null),a(F,Be),a(F,ne),a(p,Ge),a(p,j),f(H,j,null),a(j,Ve),a(j,oe),a(p,Ee),a(p,W),f(Y,W,null),a(W,$e),a(W,se),a(p,Pe),a(p,ae),f(D,ae,null),l(e,ye,n),f(A,e,n),l(e,be,n),l(e,v,n),f(Q,v,null),a(v,Ne),a(v,le),l(e,Me,n),f(q,e,n),l(e,we,n),l(e,ie,n),Te=!0},p(e,[n]){const c={};n&2&&(c.$$scope={dirty:n,ctx:e}),k.$set(c)},i(e){Te||(h(w.$$.fragment,e),h(x.$$.fragment,e),h($.$$.fragment,e),h(P.$$.fragment,e),h(N.$$.fragment,e),h(R.$$.fragment,e),h(k.$$.fragment,e),h(X.$$.fragment,e),h(z.$$.fragment,e),h(H.$$.fragment,e),h(Y.$$.fragment,e),h(D.$$.fragment,e),h(A.$$.fragment,e),h(Q.$$.fragment,e),h(q.$$.fragment,e),Te=!0)},o(e){_(w.$$.fragment,e),_(x.$$.fragment,e),_($.$$.fragment,e),_(P.$$.fragment,e),_(N.$$.fragment,e),_(R.$$.fragment,e),_(k.$$.fragment,e),_(X.$$.fragment,e),_(z.$$.fragment,e),_(H.$$.fragment,e),_(Y.$$.fragment,e),_(D.$$.fragment,e),_(A.$$.fragment,e),_(Q.$$.fragment,e),_(q.$$.fragment,e),Te=!1},d(e){e&&(t(B),t(I),t(M),t(d),t(ce),t(C),t(de),t(G),t(me),t(V),t(ue),t(E),t(ge),t(L),t(fe),t(he),t(_e),t(p),t(ye),t(be),t(v),t(Me),t(we),t(ie)),t(m),y(w,e),y(x,e),y($,e),y(P,e),y(N),y(R),y(k),y(X),y(z),y(H),y(Y),y(D),y(A,e),y(Q),y(q,e)}}}const ht='{"title":"FluxControlInpaint","local":"fluxcontrolinpaint","sections":[{"title":"FluxControlInpaintPipeline","local":"diffusers.FluxControlInpaintPipeline","sections":[],"depth":2},{"title":"FluxPipelineOutput","local":"diffusers.pipelines.flux.pipeline_output.FluxPipelineOutput","sections":[],"depth":2}],"depth":1}';function _t(pe){return lt(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Jt extends it{constructor(m){super(),pt(this,m,_t,ft,at,{})}}export{Jt as component}; | |
Xet Storage Details
- Size:
- 46.2 kB
- Xet hash:
- 945313ee4c76f6222a8051076ed65992791ce0f5fba7c35a75bea4f7acc89e29
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.