Buckets:
| import{s as st,o as it,n as rt}from"../chunks/scheduler.53228c21.js";import{S as lt,i as pt,e as r,s as o,c as g,h as dt,a as l,d as t,b as a,f as T,g as u,j as f,k as C,l as s,m as i,n as _,t as h,o as b,p as v}from"../chunks/index.100fac89.js";import{C as ct}from"../chunks/CopyLLMTxtMenu.ec4493db.js";import{D as J}from"../chunks/Docstring.86275b98.js";import{C as mt}from"../chunks/CodeBlock.d30a6509.js";import{E as gt}from"../chunks/ExampleCodeBlock.8d9bfd44.js";import{H as Fe,E as ut}from"../chunks/MermaidChart.svelte_svelte_type_style_lang.d4ef6b1e.js";function ft(pe){let m,q="Examples:",$,y,w;return y=new mt({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwU2FuYUNvbnRyb2xOZXRQaXBlbGluZSUwQWZyb20lMjBkaWZmdXNlcnMudXRpbHMlMjBpbXBvcnQlMjBsb2FkX2ltYWdlJTBBJTBBcGlwZSUyMCUzRCUyMFNhbmFDb250cm9sTmV0UGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMmlzaGFuMjQlMkZTYW5hXzYwME1fMTAyNHB4X0NvbnRyb2xOZXRQbHVzX2RpZmZ1c2VycyUyMiUyQyUwQSUyMCUyMCUyMCUyMHZhcmlhbnQlM0QlMjJmcDE2JTIyJTJDJTBBJTIwJTIwJTIwJTIwdG9yY2hfZHR5cGUlM0QlN0IlMjJkZWZhdWx0JTIyJTNBJTIwdG9yY2guYmZsb2F0MTYlMkMlMjAlMjJjb250cm9sbmV0JTIyJTNBJTIwdG9yY2guZmxvYXQxNiUyQyUyMCUyMnRyYW5zZm9ybWVyJTIyJTNBJTIwdG9yY2guZmxvYXQxNiU3RCUyQyUwQSUyMCUyMCUyMCUyMGRldmljZV9tYXAlM0QlMjJiYWxhbmNlZCUyMiUyQyUwQSklMEFjb25kX2ltYWdlJTIwJTNEJTIwbG9hZF9pbWFnZSglMEElMjAlMjAlMjAlMjAlMjJodHRwcyUzQSUyRiUyRmh1Z2dpbmdmYWNlLmNvJTJGaXNoYW4yNCUyRlNhbmFfNjAwTV8xMDI0cHhfQ29udHJvbE5ldF9kaWZmdXNlcnMlMkZyZXNvbHZlJTJGbWFpbiUyRmhlZF9leGFtcGxlLnBuZyUyMiUwQSklMEFwcm9tcHQlMjAlM0QlMjAnYSUyMGNhdCUyMHdpdGglMjBhJTIwbmVvbiUyMHNpZ24lMjB0aGF0JTIwc2F5cyUyMCUyMlNhbmElMjInJTBBaW1hZ2UlMjAlM0QlMjBwaXBlKCUwQSUyMCUyMCUyMCUyMHByb21wdCUyQyUwQSUyMCUyMCUyMCUyMGNvbnRyb2xfaW1hZ2UlM0Rjb25kX2ltYWdlJTJDJTBBKS5pbWFnZXMlNUIwJTVEJTBBaW1hZ2Uuc2F2ZSglMjJvdXRwdXQucG5nJTIyKQ==",highlighted:`<span class="hljs-meta">>>> </span><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> SanaControlNetPipeline | |
| <span class="hljs-meta">>>> </span><span class="hljs-keyword">from</span> diffusers.utils <span class="hljs-keyword">import</span> load_image | |
| <span class="hljs-meta">>>> </span>pipe = SanaControlNetPipeline.from_pretrained( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"ishan24/Sana_600M_1024px_ControlNetPlus_diffusers"</span>, | |
| <span class="hljs-meta">... </span> variant=<span class="hljs-string">"fp16"</span>, | |
| <span class="hljs-meta">... </span> torch_dtype={<span class="hljs-string">"default"</span>: torch.bfloat16, <span class="hljs-string">"controlnet"</span>: torch.float16, <span class="hljs-string">"transformer"</span>: torch.float16}, | |
| <span class="hljs-meta">... </span> device_map=<span class="hljs-string">"balanced"</span>, | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>cond_image = load_image( | |
| <span class="hljs-meta">... </span> <span class="hljs-string">"https://huggingface.co/ishan24/Sana_600M_1024px_ControlNet_diffusers/resolve/main/hed_example.png"</span> | |
| <span class="hljs-meta">... </span>) | |
| <span class="hljs-meta">>>> </span>prompt = <span class="hljs-string">'a cat with a neon sign that says "Sana"'</span> | |
| <span class="hljs-meta">>>> </span>image = pipe( | |
| <span class="hljs-meta">... </span> prompt, | |
| <span class="hljs-meta">... </span> control_image=cond_image, | |
| <span class="hljs-meta">... </span>).images[<span class="hljs-number">0</span>] | |
| <span class="hljs-meta">>>> </span>image.save(<span class="hljs-string">"output.png"</span>)`,wrap:!1}}),{c(){m=r("p"),m.textContent=q,$=o(),g(y.$$.fragment)},l(d){m=l(d,"P",{"data-svelte-h":!0}),f(m)!=="svelte-kvfsh7"&&(m.textContent=q),$=a(d),u(y.$$.fragment,d)},m(d,x){i(d,m,x),i(d,$,x),_(y,d,x),w=!0},p:rt,i(d){w||(h(y.$$.fragment,d),w=!0)},o(d){b(y.$$.fragment,d),w=!1},d(d){d&&(t(m),t($)),v(y,d)}}}function _t(pe){let m,q,$,y,w,d,x,de,M,Ve='<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/>',ce,E,Be='ControlNet was introduced in <a href="https://huggingface.co/papers/2302.05543" rel="nofollow">Adding Conditional Control to Text-to-Image Diffusion Models</a> by Lvmin Zhang, Anyi Rao, and Maneesh Agrawala.',me,Z,We="With a ControlNet model, you can provide an additional control image to condition and control Stable Diffusion generation. For example, if you provide a depth map, the ControlNet model generates an image that’ll preserve the spatial information from the depth map. It is a more flexible and accurate way to control the image generation process.",ge,D,Qe="The abstract from the paper is:",ue,A,ze="<em>We present ControlNet, a neural network architecture to add spatial conditioning controls to large, pretrained text-to-image diffusion models. ControlNet locks the production-ready large diffusion models, and reuses their deep and robust encoding layers pretrained with billions of images as a strong backbone to learn a diverse set of conditional controls. The neural architecture is connected with “zero convolutions” (zero-initialized convolution layers) that progressively grow the parameters from zero and ensure that no harmful noise could affect the finetuning. We test various conditioning controls, eg, edges, depth, segmentation, human pose, etc, with Stable Diffusion, using single or multiple conditions, with or without prompts. We show that the training of ControlNets is robust with small (<50k) and large (>1m) datasets. Extensive results show that ControlNet may facilitate wider applications to control image diffusion models.</em>",fe,G,Re=`This pipeline was contributed by <a href="https://huggingface.co/ishan24" rel="nofollow">ishan24</a>. ❤️ | |
| The original codebase can be found at <a href="https://github.com/NVlabs/Sana" rel="nofollow">NVlabs/Sana</a>, and you can find official ControlNet checkpoints on <a href="https://huggingface.co/Efficient-Large-Model" rel="nofollow">Efficient-Large-Model’s</a> Hub profile.`,_e,O,he,p,H,Se,K,Xe='Pipeline for text-to-image generation using <a href="https://huggingface.co/papers/2410.10629" rel="nofollow">Sana</a>.',Ie,N,F,ke,ee,Ye="Function invoked when calling the pipeline for generation.",Le,S,Ue,I,V,je,te,Ke=`Disable sliced VAE decoding. If <code>enable_vae_slicing</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,Je,k,B,qe,ne,et=`Disable tiled VAE decoding. If <code>enable_vae_tiling</code> was previously enabled, this method will go back to | |
| computing decoding in one step.`,Ee,L,W,Ze,oe,tt=`Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to | |
| compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.`,De,U,Q,Ae,ae,nt=`Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to | |
| compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow | |
| processing larger images.`,Ge,j,z,Oe,se,ot="Encodes the prompt into text encoder hidden states.",be,R,ve,P,X,He,ie,at="Output class for Sana pipelines.",ye,Y,we,le,xe;return w=new ct({props:{containerStyle:"float: right; margin-left: 10px; display: inline-flex; position: relative; z-index: 10;"}}),x=new Fe({props:{title:"ControlNet",local:"controlnet",headingTag:"h1"}}),O=new Fe({props:{title:"SanaControlNetPipeline",local:"diffusers.SanaControlNetPipeline",headingTag:"h2"}}),H=new J({props:{name:"class diffusers.SanaControlNetPipeline",anchor:"diffusers.SanaControlNetPipeline",parameters:[{name:"tokenizer",val:": typing.Union[transformers.models.gemma.tokenization_gemma.GemmaTokenizer, transformers.models.gemma.tokenization_gemma_fast.GemmaTokenizerFast]"},{name:"text_encoder",val:": Gemma2PreTrainedModel"},{name:"vae",val:": AutoencoderDC"},{name:"transformer",val:": SanaTransformer2DModel"},{name:"controlnet",val:": SanaControlNetModel"},{name:"scheduler",val:": DPMSolverMultistepScheduler"}],source:"https://github.com/huggingface/diffusers/blob/vr_12631/src/diffusers/pipelines/sana/pipeline_sana_controlnet.py#L197"}}),F=new J({props:{name:"__call__",anchor:"diffusers.SanaControlNetPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"negative_prompt",val:": str = ''"},{name:"num_inference_steps",val:": int = 20"},{name:"timesteps",val:": typing.List[int] = None"},{name:"sigmas",val:": typing.List[float] = None"},{name:"guidance_scale",val:": float = 4.5"},{name:"control_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"controlnet_conditioning_scale",val:": typing.Union[float, typing.List[float]] = 1.0"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"height",val:": int = 1024"},{name:"width",val:": int = 1024"},{name:"eta",val:": float = 0.0"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"clean_caption",val:": bool = False"},{name:"use_resolution_binning",val:": bool = True"},{name:"attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 300"},{name:"complex_human_instruction",val:`: typing.List[str] = ["Given a user prompt, generate an 'Enhanced prompt' that provides detailed visual descriptions suitable for image generation. Evaluate the level of detail in the user prompt:", '- If the prompt is simple, focus on adding specifics about colors, shapes, sizes, textures, and spatial relationships to create vivid and concrete scenes.', '- If the prompt is already detailed, refine and enhance the existing details slightly without overcomplicating.', 'Here are examples of how to transform or refine prompts:', '- User Prompt: A cat sleeping -> Enhanced: A small, fluffy white cat curled up in a round shape, sleeping peacefully on a warm sunny windowsill, surrounded by pots of blooming red flowers.', '- User Prompt: A busy city street -> Enhanced: A bustling city street scene at dusk, featuring glowing street lamps, a diverse crowd of people in colorful clothing, and a double-decker bus passing by towering glass skyscrapers.', 'Please generate only the enhanced description for the prompt below and avoid including any additional commentary or evaluations:', 'User Prompt: ']`}],parametersDescription:[{anchor:"diffusers.SanaControlNetPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>. | |
| instead.`,name:"prompt"},{anchor:"diffusers.SanaControlNetPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| <code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is | |
| less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.SanaControlNetPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 20) — | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.SanaControlNetPipeline.__call__.timesteps",description:`<strong>timesteps</strong> (<code>List[int]</code>, <em>optional</em>) — | |
| Custom timesteps to use for the denoising process with schedulers which support a <code>timesteps</code> argument | |
| in their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is | |
| passed will be used. Must be in descending order.`,name:"timesteps"},{anchor:"diffusers.SanaControlNetPipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) — | |
| Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in | |
| their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed | |
| will be used.`,name:"sigmas"},{anchor:"diffusers.SanaControlNetPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 4.5) — | |
| Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion | |
| Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2. | |
| of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting | |
| <code>guidance_scale > 1</code>. Higher guidance scale encourages to generate images that are closely linked to | |
| the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.SanaControlNetPipeline.__call__.control_image",description:`<strong>control_image</strong> (<code>torch.Tensor</code>, <code>PIL.Image.Image</code>, <code>np.ndarray</code>, <code>List[torch.Tensor]</code>, <code>List[PIL.Image.Image]</code>, <code>List[np.ndarray]</code>, — | |
| <code>List[List[torch.Tensor]]</code>, <code>List[List[np.ndarray]]</code> or <code>List[List[PIL.Image.Image]]</code>): | |
| The ControlNet input condition to provide guidance to the <code>unet</code> for generation. If the type is | |
| specified as <code>torch.Tensor</code>, it is passed to ControlNet as is. <code>PIL.Image.Image</code> can also be accepted | |
| as an image. The dimensions of the output image defaults to <code>image</code>’s dimensions. If height and/or | |
| width are passed, <code>image</code> is resized accordingly. If multiple ControlNets are specified in <code>init</code>, | |
| images must be passed as a list such that each element of the list can be correctly batched for input | |
| to a single ControlNet.`,name:"control_image"},{anchor:"diffusers.SanaControlNetPipeline.__call__.controlnet_conditioning_scale",description:`<strong>controlnet_conditioning_scale</strong> (<code>float</code> or <code>List[float]</code>, <em>optional</em>, defaults to 1.0) — | |
| The outputs of the ControlNet are multiplied by <code>controlnet_conditioning_scale</code> before they are added | |
| to the residual in the original <code>unet</code>. If multiple ControlNets are specified in <code>init</code>, you can set | |
| the corresponding scale as a list.`,name:"controlnet_conditioning_scale"},{anchor:"diffusers.SanaControlNetPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.SanaControlNetPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size) — | |
| The height in pixels of the generated image.`,name:"height"},{anchor:"diffusers.SanaControlNetPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size) — | |
| The width in pixels of the generated image.`,name:"width"},{anchor:"diffusers.SanaControlNetPipeline.__call__.eta",description:`<strong>eta</strong> (<code>float</code>, <em>optional</em>, defaults to 0.0) — | |
| Corresponds to parameter eta (η) in the DDIM paper: <a href="https://huggingface.co/papers/2010.02502" rel="nofollow">https://huggingface.co/papers/2010.02502</a>. Only | |
| applies to <a href="/docs/diffusers/pr_12631/en/api/schedulers/ddim#diffusers.DDIMScheduler">schedulers.DDIMScheduler</a>, will be ignored for others.`,name:"eta"},{anchor:"diffusers.SanaControlNetPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) — | |
| One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a> | |
| to make generation deterministic.`,name:"generator"},{anchor:"diffusers.SanaControlNetPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will be generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.SanaControlNetPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.SanaControlNetPipeline.__call__.prompt_attention_mask",description:"<strong>prompt_attention_mask</strong> (<code>torch.Tensor</code>, <em>optional</em>) — Pre-generated attention mask for text embeddings.",name:"prompt_attention_mask"},{anchor:"diffusers.SanaControlNetPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not | |
| provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.SanaControlNetPipeline.__call__.negative_prompt_attention_mask",description:`<strong>negative_prompt_attention_mask</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated attention mask for negative text embeddings.`,name:"negative_prompt_attention_mask"},{anchor:"diffusers.SanaControlNetPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>"pil"</code>) — | |
| The output format of the generate image. Choose between | |
| <a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.SanaControlNetPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to return a <code>~pipelines.stable_diffusion.IFPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.SanaControlNetPipeline.__call__.attention_kwargs",description:`<strong>attention_kwargs</strong> — | |
| A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under | |
| <code>self.processor</code> in | |
| <a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"attention_kwargs"},{anchor:"diffusers.SanaControlNetPipeline.__call__.clean_caption",description:`<strong>clean_caption</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| Whether or not to clean the caption before creating embeddings. Requires <code>beautifulsoup4</code> and <code>ftfy</code> to | |
| be installed. If the dependencies are not installed, the embeddings will be created from the raw | |
| prompt.`,name:"clean_caption"},{anchor:"diffusers.SanaControlNetPipeline.__call__.use_resolution_binning",description:`<strong>use_resolution_binning</strong> (<code>bool</code> defaults to <code>True</code>) — | |
| If set to <code>True</code>, the requested height and width are first mapped to the closest resolutions using | |
| <code>ASPECT_RATIO_1024_BIN</code>. After the produced latents are decoded into images, they are resized back to | |
| the requested resolution. Useful for generating non-square images.`,name:"use_resolution_binning"},{anchor:"diffusers.SanaControlNetPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) — | |
| A function that calls at the end of each denoising steps during the inference. The function is called | |
| with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by | |
| <code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.SanaControlNetPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) — | |
| The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list | |
| will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the | |
| <code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.SanaControlNetPipeline.__call__.max_sequence_length",description:`<strong>max_sequence_length</strong> (<code>int</code> defaults to <code>300</code>) — | |
| Maximum sequence length to use with the <code>prompt</code>.`,name:"max_sequence_length"},{anchor:"diffusers.SanaControlNetPipeline.__call__.complex_human_instruction",description:`<strong>complex_human_instruction</strong> (<code>List[str]</code>, <em>optional</em>) — | |
| Instructions for complex human attention: | |
| <a href="https://github.com/NVlabs/Sana/blob/main/configs/sana_app_config/Sana_1600M_app.yaml#L55" rel="nofollow">https://github.com/NVlabs/Sana/blob/main/configs/sana_app_config/Sana_1600M_app.yaml#L55</a>.`,name:"complex_human_instruction"}],source:"https://github.com/huggingface/diffusers/blob/vr_12631/src/diffusers/pipelines/sana/pipeline_sana_controlnet.py#L776",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p>If <code>return_dict</code> is <code>True</code>, <a | |
| href="/docs/diffusers/pr_12631/en/api/pipelines/controlnet_sana#diffusers.pipelines.sana.pipeline_output.SanaPipelineOutput" | |
| >SanaPipelineOutput</a> is returned, | |
| otherwise a <code>tuple</code> is returned where the first element is a list with the generated images</p> | |
| `,returnType:`<script context="module">export const metadata = 'undefined';<\/script> | |
| <p><a | |
| href="/docs/diffusers/pr_12631/en/api/pipelines/controlnet_sana#diffusers.pipelines.sana.pipeline_output.SanaPipelineOutput" | |
| >SanaPipelineOutput</a> or <code>tuple</code></p> | |
| `}}),S=new gt({props:{anchor:"diffusers.SanaControlNetPipeline.__call__.example",$$slots:{default:[ft]},$$scope:{ctx:pe}}}),V=new J({props:{name:"disable_vae_slicing",anchor:"diffusers.SanaControlNetPipeline.disable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12631/src/diffusers/pipelines/sana/pipeline_sana_controlnet.py#L249"}}),B=new J({props:{name:"disable_vae_tiling",anchor:"diffusers.SanaControlNetPipeline.disable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12631/src/diffusers/pipelines/sana/pipeline_sana_controlnet.py#L276"}}),W=new J({props:{name:"enable_vae_slicing",anchor:"diffusers.SanaControlNetPipeline.enable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12631/src/diffusers/pipelines/sana/pipeline_sana_controlnet.py#L236"}}),Q=new J({props:{name:"enable_vae_tiling",anchor:"diffusers.SanaControlNetPipeline.enable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12631/src/diffusers/pipelines/sana/pipeline_sana_controlnet.py#L262"}}),z=new J({props:{name:"encode_prompt",anchor:"diffusers.SanaControlNetPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"do_classifier_free_guidance",val:": bool = True"},{name:"negative_prompt",val:": str = ''"},{name:"num_images_per_prompt",val:": int = 1"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"clean_caption",val:": bool = False"},{name:"max_sequence_length",val:": int = 300"},{name:"complex_human_instruction",val:": typing.Optional[typing.List[str]] = None"},{name:"lora_scale",val:": typing.Optional[float] = None"}],parametersDescription:[{anchor:"diffusers.SanaControlNetPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| prompt to be encoded`,name:"prompt"},{anchor:"diffusers.SanaControlNetPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) — | |
| The prompt not to guide the image generation. If not defined, one has to pass <code>negative_prompt_embeds</code> | |
| instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is less than <code>1</code>). For | |
| PixArt-Alpha, this should be "".`,name:"negative_prompt"},{anchor:"diffusers.SanaControlNetPipeline.encode_prompt.do_classifier_free_guidance",description:`<strong>do_classifier_free_guidance</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) — | |
| whether to use classifier free guidance or not`,name:"do_classifier_free_guidance"},{anchor:"diffusers.SanaControlNetPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) — | |
| number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.SanaControlNetPipeline.encode_prompt.device",description:`<strong>device</strong> — (<code>torch.device</code>, <em>optional</em>): | |
| torch device to place the resulting embeddings on`,name:"device"},{anchor:"diffusers.SanaControlNetPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not | |
| provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.SanaControlNetPipeline.encode_prompt.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) — | |
| Pre-generated negative text embeddings. For Sana, it’s should be the embeddings of the "" string.`,name:"negative_prompt_embeds"},{anchor:"diffusers.SanaControlNetPipeline.encode_prompt.clean_caption",description:`<strong>clean_caption</strong> (<code>bool</code>, defaults to <code>False</code>) — | |
| If <code>True</code>, the function will preprocess and clean the provided caption before encoding.`,name:"clean_caption"},{anchor:"diffusers.SanaControlNetPipeline.encode_prompt.max_sequence_length",description:"<strong>max_sequence_length</strong> (<code>int</code>, defaults to 300) — Maximum sequence length to use for the prompt.",name:"max_sequence_length"},{anchor:"diffusers.SanaControlNetPipeline.encode_prompt.complex_human_instruction",description:`<strong>complex_human_instruction</strong> (<code>list[str]</code>, defaults to <code>complex_human_instruction</code>) — | |
| If <code>complex_human_instruction</code> is not empty, the function will use the complex Human instruction for | |
| the prompt.`,name:"complex_human_instruction"}],source:"https://github.com/huggingface/diffusers/blob/vr_12631/src/diffusers/pipelines/sana/pipeline_sana_controlnet.py#L349"}}),R=new Fe({props:{title:"SanaPipelineOutput",local:"diffusers.pipelines.sana.pipeline_output.SanaPipelineOutput",headingTag:"h2"}}),X=new J({props:{name:"class diffusers.pipelines.sana.pipeline_output.SanaPipelineOutput",anchor:"diffusers.pipelines.sana.pipeline_output.SanaPipelineOutput",parameters:[{name:"images",val:": typing.Union[typing.List[PIL.Image.Image], numpy.ndarray]"}],parametersDescription:[{anchor:"diffusers.pipelines.sana.pipeline_output.SanaPipelineOutput.images",description:`<strong>images</strong> (<code>List[PIL.Image.Image]</code> or <code>np.ndarray</code>) — | |
| List of denoised PIL images of length <code>batch_size</code> or numpy array of shape <code>(batch_size, height, width, num_channels)</code>. PIL images or numpy array present the denoised images of the diffusion pipeline.`,name:"images"}],source:"https://github.com/huggingface/diffusers/blob/vr_12631/src/diffusers/pipelines/sana/pipeline_output.py#L12"}}),Y=new ut({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/controlnet_sana.md"}}),{c(){m=r("meta"),q=o(),$=r("p"),y=o(),g(w.$$.fragment),d=o(),g(x.$$.fragment),de=o(),M=r("div"),M.innerHTML=Ve,ce=o(),E=r("p"),E.innerHTML=Be,me=o(),Z=r("p"),Z.textContent=We,ge=o(),D=r("p"),D.textContent=Qe,ue=o(),A=r("p"),A.innerHTML=ze,fe=o(),G=r("p"),G.innerHTML=Re,_e=o(),g(O.$$.fragment),he=o(),p=r("div"),g(H.$$.fragment),Se=o(),K=r("p"),K.innerHTML=Xe,Ie=o(),N=r("div"),g(F.$$.fragment),ke=o(),ee=r("p"),ee.textContent=Ye,Le=o(),g(S.$$.fragment),Ue=o(),I=r("div"),g(V.$$.fragment),je=o(),te=r("p"),te.innerHTML=Ke,Je=o(),k=r("div"),g(B.$$.fragment),qe=o(),ne=r("p"),ne.innerHTML=et,Ee=o(),L=r("div"),g(W.$$.fragment),Ze=o(),oe=r("p"),oe.textContent=tt,De=o(),U=r("div"),g(Q.$$.fragment),Ae=o(),ae=r("p"),ae.textContent=nt,Ge=o(),j=r("div"),g(z.$$.fragment),Oe=o(),se=r("p"),se.textContent=ot,be=o(),g(R.$$.fragment),ve=o(),P=r("div"),g(X.$$.fragment),He=o(),ie=r("p"),ie.textContent=at,ye=o(),g(Y.$$.fragment),we=o(),le=r("p"),this.h()},l(e){const n=dt("svelte-u9bgzb",document.head);m=l(n,"META",{name:!0,content:!0}),n.forEach(t),q=a(e),$=l(e,"P",{}),T($).forEach(t),y=a(e),u(w.$$.fragment,e),d=a(e),u(x.$$.fragment,e),de=a(e),M=l(e,"DIV",{class:!0,"data-svelte-h":!0}),f(M)!=="svelte-si9ct8"&&(M.innerHTML=Ve),ce=a(e),E=l(e,"P",{"data-svelte-h":!0}),f(E)!=="svelte-1v2xz23"&&(E.innerHTML=Be),me=a(e),Z=l(e,"P",{"data-svelte-h":!0}),f(Z)!=="svelte-1dn0wji"&&(Z.textContent=We),ge=a(e),D=l(e,"P",{"data-svelte-h":!0}),f(D)!=="svelte-1cwsb16"&&(D.textContent=Qe),ue=a(e),A=l(e,"P",{"data-svelte-h":!0}),f(A)!=="svelte-fbiw6t"&&(A.innerHTML=ze),fe=a(e),G=l(e,"P",{"data-svelte-h":!0}),f(G)!=="svelte-7t3a1f"&&(G.innerHTML=Re),_e=a(e),u(O.$$.fragment,e),he=a(e),p=l(e,"DIV",{class:!0});var c=T(p);u(H.$$.fragment,c),Se=a(c),K=l(c,"P",{"data-svelte-h":!0}),f(K)!=="svelte-1ot17tf"&&(K.innerHTML=Xe),Ie=a(c),N=l(c,"DIV",{class:!0});var re=T(N);u(F.$$.fragment,re),ke=a(re),ee=l(re,"P",{"data-svelte-h":!0}),f(ee)!=="svelte-v78lg8"&&(ee.textContent=Ye),Le=a(re),u(S.$$.fragment,re),re.forEach(t),Ue=a(c),I=l(c,"DIV",{class:!0});var Ce=T(I);u(V.$$.fragment,Ce),je=a(Ce),te=l(Ce,"P",{"data-svelte-h":!0}),f(te)!=="svelte-1s3c06i"&&(te.innerHTML=Ke),Ce.forEach(t),Je=a(c),k=l(c,"DIV",{class:!0});var $e=T(k);u(B.$$.fragment,$e),qe=a($e),ne=l($e,"P",{"data-svelte-h":!0}),f(ne)!=="svelte-pkn4ui"&&(ne.innerHTML=et),$e.forEach(t),Ee=a(c),L=l(c,"DIV",{class:!0});var Ne=T(L);u(W.$$.fragment,Ne),Ze=a(Ne),oe=l(Ne,"P",{"data-svelte-h":!0}),f(oe)!=="svelte-14bnrb6"&&(oe.textContent=tt),Ne.forEach(t),De=a(c),U=l(c,"DIV",{class:!0});var Te=T(U);u(Q.$$.fragment,Te),Ae=a(Te),ae=l(Te,"P",{"data-svelte-h":!0}),f(ae)!=="svelte-1xwrf7t"&&(ae.textContent=nt),Te.forEach(t),Ge=a(c),j=l(c,"DIV",{class:!0});var Pe=T(j);u(z.$$.fragment,Pe),Oe=a(Pe),se=l(Pe,"P",{"data-svelte-h":!0}),f(se)!=="svelte-16q0ax1"&&(se.textContent=ot),Pe.forEach(t),c.forEach(t),be=a(e),u(R.$$.fragment,e),ve=a(e),P=l(e,"DIV",{class:!0});var Me=T(P);u(X.$$.fragment,Me),He=a(Me),ie=l(Me,"P",{"data-svelte-h":!0}),f(ie)!=="svelte-1h3n85u"&&(ie.textContent=at),Me.forEach(t),ye=a(e),u(Y.$$.fragment,e),we=a(e),le=l(e,"P",{}),T(le).forEach(t),this.h()},h(){C(m,"name","hf:doc:metadata"),C(m,"content",ht),C(M,"class","flex flex-wrap space-x-1"),C(N,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),C(I,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),C(k,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),C(L,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),C(U,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),C(j,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),C(p,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),C(P,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,n){s(document.head,m),i(e,q,n),i(e,$,n),i(e,y,n),_(w,e,n),i(e,d,n),_(x,e,n),i(e,de,n),i(e,M,n),i(e,ce,n),i(e,E,n),i(e,me,n),i(e,Z,n),i(e,ge,n),i(e,D,n),i(e,ue,n),i(e,A,n),i(e,fe,n),i(e,G,n),i(e,_e,n),_(O,e,n),i(e,he,n),i(e,p,n),_(H,p,null),s(p,Se),s(p,K),s(p,Ie),s(p,N),_(F,N,null),s(N,ke),s(N,ee),s(N,Le),_(S,N,null),s(p,Ue),s(p,I),_(V,I,null),s(I,je),s(I,te),s(p,Je),s(p,k),_(B,k,null),s(k,qe),s(k,ne),s(p,Ee),s(p,L),_(W,L,null),s(L,Ze),s(L,oe),s(p,De),s(p,U),_(Q,U,null),s(U,Ae),s(U,ae),s(p,Ge),s(p,j),_(z,j,null),s(j,Oe),s(j,se),i(e,be,n),_(R,e,n),i(e,ve,n),i(e,P,n),_(X,P,null),s(P,He),s(P,ie),i(e,ye,n),_(Y,e,n),i(e,we,n),i(e,le,n),xe=!0},p(e,[n]){const c={};n&2&&(c.$$scope={dirty:n,ctx:e}),S.$set(c)},i(e){xe||(h(w.$$.fragment,e),h(x.$$.fragment,e),h(O.$$.fragment,e),h(H.$$.fragment,e),h(F.$$.fragment,e),h(S.$$.fragment,e),h(V.$$.fragment,e),h(B.$$.fragment,e),h(W.$$.fragment,e),h(Q.$$.fragment,e),h(z.$$.fragment,e),h(R.$$.fragment,e),h(X.$$.fragment,e),h(Y.$$.fragment,e),xe=!0)},o(e){b(w.$$.fragment,e),b(x.$$.fragment,e),b(O.$$.fragment,e),b(H.$$.fragment,e),b(F.$$.fragment,e),b(S.$$.fragment,e),b(V.$$.fragment,e),b(B.$$.fragment,e),b(W.$$.fragment,e),b(Q.$$.fragment,e),b(z.$$.fragment,e),b(R.$$.fragment,e),b(X.$$.fragment,e),b(Y.$$.fragment,e),xe=!1},d(e){e&&(t(q),t($),t(y),t(d),t(de),t(M),t(ce),t(E),t(me),t(Z),t(ge),t(D),t(ue),t(A),t(fe),t(G),t(_e),t(he),t(p),t(be),t(ve),t(P),t(ye),t(we),t(le)),t(m),v(w,e),v(x,e),v(O,e),v(H),v(F),v(S),v(V),v(B),v(W),v(Q),v(z),v(R,e),v(X),v(Y,e)}}}const ht='{"title":"ControlNet","local":"controlnet","sections":[{"title":"SanaControlNetPipeline","local":"diffusers.SanaControlNetPipeline","sections":[],"depth":2},{"title":"SanaPipelineOutput","local":"diffusers.pipelines.sana.pipeline_output.SanaPipelineOutput","sections":[],"depth":2}],"depth":1}';function bt(pe){return it(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Tt extends lt{constructor(m){super(),pt(this,m,bt,_t,st,{})}}export{Tt as component}; | |
Xet Storage Details
- Size:
- 36.3 kB
- Xet hash:
- baf8b39a501fc65f8390de68432e5bf75f16c87d6d17efb7bc73d87a0b3dc1b7
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.