Buckets:

rtrm's picture
download
raw
68.7 kB
import{s as gn,o as hn,n as cn}from"../chunks/scheduler.53228c21.js";import{S as fn,i as un,e as i,s as o,c as p,h as _n,a as r,d as n,b as a,f as C,g as d,j as _,k as T,l as t,m as l,n as c,t as g,o as h,p as f}from"../chunks/index.100fac89.js";import{C as bn}from"../chunks/CopyLLMTxtMenu.2e0d31a4.js";import{D as U}from"../chunks/Docstring.95cca3f7.js";import{C as dt}from"../chunks/CodeBlock.d30a6509.js";import{E as dn}from"../chunks/ExampleCodeBlock.1209a48b.js";import{H as Le,E as yn}from"../chunks/MermaidChart.svelte_svelte_type_style_lang.b400f367.js";function Mn(be){let u,k="Examples:",J,v,I;return v=new dt({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwQ2hyb21hUGlwZWxpbmUlMEElMEFtb2RlbF9pZCUyMCUzRCUyMCUyMmxvZGVzdG9uZXMlMkZDaHJvbWExLUhEJTIyJTBBY2twdF9wYXRoJTIwJTNEJTIwJTIyaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmxvZGVzdG9uZXMlMkZDaHJvbWExLUhEJTJGYmxvYiUyRm1haW4lMkZDaHJvbWExLUhELnNhZmV0ZW5zb3JzJTIyJTBBdHJhbnNmb3JtZXIlMjAlM0QlMjBDaHJvbWFUcmFuc2Zvcm1lcjJETW9kZWwuZnJvbV9zaW5nbGVfZmlsZShja3B0X3BhdGglMkMlMjB0b3JjaF9kdHlwZSUzRHRvcmNoLmJmbG9hdDE2KSUwQXBpcGUlMjAlM0QlMjBDaHJvbWFQaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwbW9kZWxfaWQlMkMlMEElMjAlMjAlMjAlMjB0cmFuc2Zvcm1lciUzRHRyYW5zZm9ybWVyJTJDJTBBJTIwJTIwJTIwJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5iZmxvYXQxNiUyQyUwQSklMEFwaXBlLmVuYWJsZV9tb2RlbF9jcHVfb2ZmbG9hZCgpJTBBcHJvbXB0JTIwJTNEJTIwJTVCJTBBJTIwJTIwJTIwJTIwJTIyQSUyMGhpZ2gtZmFzaGlvbiUyMGNsb3NlLXVwJTIwcG9ydHJhaXQlMjBvZiUyMGElMjBibG9uZGUlMjB3b21hbiUyMGluJTIwY2xlYXIlMjBzdW5nbGFzc2VzLiUyMFRoZSUyMGltYWdlJTIwdXNlcyUyMGElMjBib2xkJTIwdGVhbCUyMGFuZCUyMHJlZCUyMGNvbG9yJTIwc3BsaXQlMjBmb3IlMjBkcmFtYXRpYyUyMGxpZ2h0aW5nLiUyMFRoZSUyMGJhY2tncm91bmQlMjBpcyUyMGElMjBzaW1wbGUlMjB0ZWFsLWdyZWVuLiUyMFRoZSUyMHBob3RvJTIwaXMlMjBzaGFycCUyMGFuZCUyMHdlbGwtY29tcG9zZWQlMkMlMjBhbmQlMjBpcyUyMGRlc2lnbmVkJTIwZm9yJTIwdmlld2luZyUyMHdpdGglMjBhbmFnbHlwaCUyMDNEJTIwZ2xhc3NlcyUyMGZvciUyMG9wdGltYWwlMjBlZmZlY3QuJTIwSXQlMjBsb29rcyUyMHByb2Zlc3Npb25hbGx5JTIwZG9uZS4lMjIlMEElNUQlMEFuZWdhdGl2ZV9wcm9tcHQlMjAlM0QlMjAlNUIlMEElMjAlMjAlMjAlMjAlMjJsb3clMjBxdWFsaXR5JTJDJTIwdWdseSUyQyUyMHVuZmluaXNoZWQlMkMlMjBvdXQlMjBvZiUyMGZvY3VzJTJDJTIwZGVmb3JtZWQlMkMlMjBkaXNmaWd1cmUlMkMlMjBibHVycnklMkMlMjBzbXVkZ2VkJTJDJTIwcmVzdHJpY3RlZCUyMHBhbGV0dGUlMkMlMjBmbGF0JTIwY29sb3JzJTIyJTBBJTVEJTBBaW1hZ2UlMjAlM0QlMjBwaXBlKHByb21wdCUyQyUyMG5lZ2F0aXZlX3Byb21wdCUzRG5lZ2F0aXZlX3Byb21wdCkuaW1hZ2VzJTVCMCU1RCUwQWltYWdlLnNhdmUoJTIyY2hyb21hLnBuZyUyMik=",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> ChromaPipeline
<span class="hljs-meta">&gt;&gt;&gt; </span>model_id = <span class="hljs-string">&quot;lodestones/Chroma1-HD&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>ckpt_path = <span class="hljs-string">&quot;https://huggingface.co/lodestones/Chroma1-HD/blob/main/Chroma1-HD.safetensors&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>transformer = ChromaTransformer2DModel.from_single_file(ckpt_path, torch_dtype=torch.bfloat16)
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe = ChromaPipeline.from_pretrained(
<span class="hljs-meta">... </span> model_id,
<span class="hljs-meta">... </span> transformer=transformer,
<span class="hljs-meta">... </span> torch_dtype=torch.bfloat16,
<span class="hljs-meta">... </span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe.enable_model_cpu_offload()
<span class="hljs-meta">&gt;&gt;&gt; </span>prompt = [
<span class="hljs-meta">... </span> <span class="hljs-string">&quot;A high-fashion close-up portrait of a blonde woman in clear sunglasses. The image uses a bold teal and red color split for dramatic lighting. The background is a simple teal-green. The photo is sharp and well-composed, and is designed for viewing with anaglyph 3D glasses for optimal effect. It looks professionally done.&quot;</span>
<span class="hljs-meta">... </span>]
<span class="hljs-meta">&gt;&gt;&gt; </span>negative_prompt = [
<span class="hljs-meta">... </span> <span class="hljs-string">&quot;low quality, ugly, unfinished, out of focus, deformed, disfigure, blurry, smudged, restricted palette, flat colors&quot;</span>
<span class="hljs-meta">... </span>]
<span class="hljs-meta">&gt;&gt;&gt; </span>image = pipe(prompt, negative_prompt=negative_prompt).images[<span class="hljs-number">0</span>]
<span class="hljs-meta">&gt;&gt;&gt; </span>image.save(<span class="hljs-string">&quot;chroma.png&quot;</span>)`,wrap:!1}}),{c(){u=i("p"),u.textContent=k,J=o(),p(v.$$.fragment)},l(m){u=r(m,"P",{"data-svelte-h":!0}),_(u)!=="svelte-kvfsh7"&&(u.textContent=k),J=a(m),d(v.$$.fragment,m)},m(m,x){l(m,u,x),l(m,J,x),c(v,m,x),I=!0},p:cn,i(m){I||(g(v.$$.fragment,m),I=!0)},o(m){h(v.$$.fragment,m),I=!1},d(m){m&&(n(u),n(J)),f(v,m)}}}function vn(be){let u,k="Examples:",J,v,I;return v=new dt({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwQ2hyb21hVHJhbnNmb3JtZXIyRE1vZGVsJTJDJTIwQ2hyb21hSW1nMkltZ1BpcGVsaW5lJTBBJTBBbW9kZWxfaWQlMjAlM0QlMjAlMjJsb2Rlc3RvbmVzJTJGQ2hyb21hMS1IRCUyMiUwQWNrcHRfcGF0aCUyMCUzRCUyMCUyMmh0dHBzJTNBJTJGJTJGaHVnZ2luZ2ZhY2UuY28lMkZsb2Rlc3RvbmVzJTJGQ2hyb21hMS1IRCUyRmJsb2IlMkZtYWluJTJGQ2hyb21hMS1IRC5zYWZldGVuc29ycyUyMiUwQXBpcGUlMjAlM0QlMjBDaHJvbWFJbWcySW1nUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMG1vZGVsX2lkJTJDJTBBJTIwJTIwJTIwJTIwdHJhbnNmb3JtZXIlM0R0cmFuc2Zvcm1lciUyQyUwQSUyMCUyMCUyMCUyMHRvcmNoX2R0eXBlJTNEdG9yY2guYmZsb2F0MTYlMkMlMEEpJTBBcGlwZS5lbmFibGVfbW9kZWxfY3B1X29mZmxvYWQoKSUwQWluaXRfaW1hZ2UlMjAlM0QlMjBsb2FkX2ltYWdlKCUwQSUyMCUyMCUyMCUyMCUyMmh0dHBzJTNBJTJGJTJGcmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSUyRkNvbXBWaXMlMkZzdGFibGUtZGlmZnVzaW9uJTJGbWFpbiUyRmFzc2V0cyUyRnN0YWJsZS1zYW1wbGVzJTJGaW1nMmltZyUyRnNrZXRjaC1tb3VudGFpbnMtaW5wdXQuanBnJTIyJTBBKSUwQXByb21wdCUyMCUzRCUyMCUyMmElMjBzY2VuaWMlMjBmYXN0YXN5JTIwbGFuZHNjYXBlJTIwd2l0aCUyMGElMjByaXZlciUyMGFuZCUyMG1vdW50YWlucyUyMGluJTIwdGhlJTIwYmFja2dyb3VuZCUyQyUyMHZpYnJhbnQlMjBjb2xvcnMlMkMlMjBkZXRhaWxlZCUyQyUyMGhpZ2glMjByZXNvbHV0aW9uJTIyJTBBbmVnYXRpdmVfcHJvbXB0JTIwJTNEJTIwJTIybG93JTIwcXVhbGl0eSUyQyUyMHVnbHklMkMlMjB1bmZpbmlzaGVkJTJDJTIwb3V0JTIwb2YlMjBmb2N1cyUyQyUyMGRlZm9ybWVkJTJDJTIwZGlzZmlndXJlJTJDJTIwYmx1cnJ5JTJDJTIwc211ZGdlZCUyQyUyMHJlc3RyaWN0ZWQlMjBwYWxldHRlJTJDJTIwZmxhdCUyMGNvbG9ycyUyMiUwQWltYWdlJTIwJTNEJTIwcGlwZShwcm9tcHQlMkMlMjBpbWFnZSUzRGluaXRfaW1hZ2UlMkMlMjBuZWdhdGl2ZV9wcm9tcHQlM0RuZWdhdGl2ZV9wcm9tcHQpLmltYWdlcyU1QjAlNUQlMEFpbWFnZS5zYXZlKCUyMmNocm9tYS1pbWcyaW1nLnBuZyUyMik=",highlighted:`<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">import</span> torch
<span class="hljs-meta">&gt;&gt;&gt; </span><span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> ChromaTransformer2DModel, ChromaImg2ImgPipeline
<span class="hljs-meta">&gt;&gt;&gt; </span>model_id = <span class="hljs-string">&quot;lodestones/Chroma1-HD&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>ckpt_path = <span class="hljs-string">&quot;https://huggingface.co/lodestones/Chroma1-HD/blob/main/Chroma1-HD.safetensors&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe = ChromaImg2ImgPipeline.from_pretrained(
<span class="hljs-meta">... </span> model_id,
<span class="hljs-meta">... </span> transformer=transformer,
<span class="hljs-meta">... </span> torch_dtype=torch.bfloat16,
<span class="hljs-meta">... </span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>pipe.enable_model_cpu_offload()
<span class="hljs-meta">&gt;&gt;&gt; </span>init_image = load_image(
<span class="hljs-meta">... </span> <span class="hljs-string">&quot;https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg&quot;</span>
<span class="hljs-meta">... </span>)
<span class="hljs-meta">&gt;&gt;&gt; </span>prompt = <span class="hljs-string">&quot;a scenic fastasy landscape with a river and mountains in the background, vibrant colors, detailed, high resolution&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>negative_prompt = <span class="hljs-string">&quot;low quality, ugly, unfinished, out of focus, deformed, disfigure, blurry, smudged, restricted palette, flat colors&quot;</span>
<span class="hljs-meta">&gt;&gt;&gt; </span>image = pipe(prompt, image=init_image, negative_prompt=negative_prompt).images[<span class="hljs-number">0</span>]
<span class="hljs-meta">&gt;&gt;&gt; </span>image.save(<span class="hljs-string">&quot;chroma-img2img.png&quot;</span>)`,wrap:!1}}),{c(){u=i("p"),u.textContent=k,J=o(),p(v.$$.fragment)},l(m){u=r(m,"P",{"data-svelte-h":!0}),_(u)!=="svelte-kvfsh7"&&(u.textContent=k),J=a(m),d(v.$$.fragment,m)},m(m,x){l(m,u,x),l(m,J,x),c(v,m,x),I=!0},p:cn,i(m){I||(g(v.$$.fragment,m),I=!0)},o(m){h(v.$$.fragment,m),I=!1},d(m){m&&(n(u),n(J)),f(v,m)}}}function wn(be){let u,k,J,v,I,m,x,Ee,G,Nt='<img alt="LoRA" src="https://img.shields.io/badge/LoRA-d8b4fe?style=flat"/> <img alt="MPS" src="https://img.shields.io/badge/MPS-000000?style=flat&amp;logo=apple&amp;logoColor=white%22"/>',Ve,Q,Ht="Chroma is a text to image generation model based on Flux.",Ne,R,zt="Original model checkpoints for Chroma can be found here:",He,Y,Dt='<li>High-resolution finetune: <a href="https://huggingface.co/lodestones/Chroma1-HD" rel="nofollow">lodestones/Chroma1-HD</a></li> <li>Base model: <a href="https://huggingface.co/lodestones/Chroma1-Base" rel="nofollow">lodestones/Chroma1-Base</a></li> <li>Original repo with progress checkpoints: <a href="https://huggingface.co/lodestones/Chroma" rel="nofollow">lodestones/Chroma</a> (loading this repo with <code>from_pretrained</code> will load a Diffusers-compatible version of the <code>unlocked-v37</code> checkpoint)</li>',ze,B,Xt="<p>Chroma can use all the same optimizations as Flux.</p>",De,F,Xe,A,Qe,q,Re,S,Qt="To use updated model checkpoints that are not in the Diffusers format, you can use the <code>ChromaTransformer2DModel</code> class to load the model from a single file in the original format. This is also useful when trying to load finetunes or quantized versions of the models that have been published by the community.",Ye,O,Rt="The following example demonstrates how to run Chroma from a single file.",Fe,K,Yt="Then run the following example",Ae,ee,qe,te,Se,b,ne,ct,ye,Ft="The Chroma pipeline for text-to-image generation.",gt,Me,At='Reference: <a href="https://huggingface.co/lodestones/Chroma1-HD/" rel="nofollow">https://huggingface.co/lodestones/Chroma1-HD/</a>',ht,Z,oe,ft,ve,qt="Function invoked when calling the pipeline for generation.",ut,P,_t,W,ae,bt,we,St=`Disable sliced VAE decoding. If <code>enable_vae_slicing</code> was previously enabled, this method will go back to
computing decoding in one step.`,yt,L,se,Mt,Te,Ot=`Disable tiled VAE decoding. If <code>enable_vae_tiling</code> was previously enabled, this method will go back to
computing decoding in one step.`,vt,E,ie,wt,Ie,Kt=`Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.`,Tt,V,re,It,Ce,en=`Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images.`,Ct,Je,le,Oe,me,Ke,y,pe,Jt,xe,tn="The Chroma pipeline for image-to-image generation.",xt,Ue,nn='Reference: <a href="https://huggingface.co/lodestones/Chroma1-HD/" rel="nofollow">https://huggingface.co/lodestones/Chroma1-HD/</a>',Ut,j,de,kt,ke,on="Function invoked when calling the pipeline for generation.",Zt,N,jt,H,ce,$t,Ze,an=`Disable sliced VAE decoding. If <code>enable_vae_slicing</code> was previously enabled, this method will go back to
computing decoding in one step.`,Gt,z,ge,Bt,je,sn=`Disable tiled VAE decoding. If <code>enable_vae_tiling</code> was previously enabled, this method will go back to
computing decoding in one step.`,Pt,D,he,Wt,$e,rn=`Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.`,Lt,X,fe,Et,Ge,ln=`Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images.`,Vt,Be,ue,et,_e,tt,We,nt;return I=new bn({props:{containerStyle:"float: right; margin-left: 10px; display: inline-flex; position: relative; z-index: 10;"}}),x=new Le({props:{title:"Chroma",local:"chroma",headingTag:"h1"}}),F=new Le({props:{title:"Inference",local:"inference",headingTag:"h2"}}),A=new dt({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwQ2hyb21hUGlwZWxpbmUlMEElMEFwaXBlJTIwJTNEJTIwQ2hyb21hUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKCUyMmxvZGVzdG9uZXMlMkZDaHJvbWExLUhEJTIyJTJDJTIwdG9yY2hfZHR5cGUlM0R0b3JjaC5iZmxvYXQxNiklMEFwaXBlLmVuYWJsZV9tb2RlbF9jcHVfb2ZmbG9hZCgpJTBBJTBBcHJvbXB0JTIwJTNEJTIwJTVCJTBBJTIwJTIwJTIwJTIwJTIyQSUyMGhpZ2gtZmFzaGlvbiUyMGNsb3NlLXVwJTIwcG9ydHJhaXQlMjBvZiUyMGElMjBibG9uZGUlMjB3b21hbiUyMGluJTIwY2xlYXIlMjBzdW5nbGFzc2VzLiUyMFRoZSUyMGltYWdlJTIwdXNlcyUyMGElMjBib2xkJTIwdGVhbCUyMGFuZCUyMHJlZCUyMGNvbG9yJTIwc3BsaXQlMjBmb3IlMjBkcmFtYXRpYyUyMGxpZ2h0aW5nLiUyMFRoZSUyMGJhY2tncm91bmQlMjBpcyUyMGElMjBzaW1wbGUlMjB0ZWFsLWdyZWVuLiUyMFRoZSUyMHBob3RvJTIwaXMlMjBzaGFycCUyMGFuZCUyMHdlbGwtY29tcG9zZWQlMkMlMjBhbmQlMjBpcyUyMGRlc2lnbmVkJTIwZm9yJTIwdmlld2luZyUyMHdpdGglMjBhbmFnbHlwaCUyMDNEJTIwZ2xhc3NlcyUyMGZvciUyMG9wdGltYWwlMjBlZmZlY3QuJTIwSXQlMjBsb29rcyUyMHByb2Zlc3Npb25hbGx5JTIwZG9uZS4lMjIlMEElNUQlMEFuZWdhdGl2ZV9wcm9tcHQlMjAlM0QlMjAlMjAlNUIlMjJsb3clMjBxdWFsaXR5JTJDJTIwdWdseSUyQyUyMHVuZmluaXNoZWQlMkMlMjBvdXQlMjBvZiUyMGZvY3VzJTJDJTIwZGVmb3JtZWQlMkMlMjBkaXNmaWd1cmUlMkMlMjBibHVycnklMkMlMjBzbXVkZ2VkJTJDJTIwcmVzdHJpY3RlZCUyMHBhbGV0dGUlMkMlMjBmbGF0JTIwY29sb3JzJTIyJTVEJTBBJTBBaW1hZ2UlMjAlM0QlMjBwaXBlKCUwQSUyMCUyMCUyMCUyMHByb21wdCUzRHByb21wdCUyQyUwQSUyMCUyMCUyMCUyMG5lZ2F0aXZlX3Byb21wdCUzRG5lZ2F0aXZlX3Byb21wdCUyQyUwQSUyMCUyMCUyMCUyMGdlbmVyYXRvciUzRHRvcmNoLkdlbmVyYXRvciglMjJjcHUlMjIpLm1hbnVhbF9zZWVkKDQzMyklMkMlMEElMjAlMjAlMjAlMjBudW1faW5mZXJlbmNlX3N0ZXBzJTNENDAlMkMlMEElMjAlMjAlMjAlMjBndWlkYW5jZV9zY2FsZSUzRDMuMCUyQyUwQSUyMCUyMCUyMCUyMG51bV9pbWFnZXNfcGVyX3Byb21wdCUzRDElMkMlMEEpLmltYWdlcyU1QjAlNUQlMEFpbWFnZS5zYXZlKCUyMmNocm9tYS5wbmclMjIp",highlighted:`<span class="hljs-keyword">import</span> torch
<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> ChromaPipeline
pipe = ChromaPipeline.from_pretrained(<span class="hljs-string">&quot;lodestones/Chroma1-HD&quot;</span>, torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()
prompt = [
<span class="hljs-string">&quot;A high-fashion close-up portrait of a blonde woman in clear sunglasses. The image uses a bold teal and red color split for dramatic lighting. The background is a simple teal-green. The photo is sharp and well-composed, and is designed for viewing with anaglyph 3D glasses for optimal effect. It looks professionally done.&quot;</span>
]
negative_prompt = [<span class="hljs-string">&quot;low quality, ugly, unfinished, out of focus, deformed, disfigure, blurry, smudged, restricted palette, flat colors&quot;</span>]
image = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
generator=torch.Generator(<span class="hljs-string">&quot;cpu&quot;</span>).manual_seed(<span class="hljs-number">433</span>),
num_inference_steps=<span class="hljs-number">40</span>,
guidance_scale=<span class="hljs-number">3.0</span>,
num_images_per_prompt=<span class="hljs-number">1</span>,
).images[<span class="hljs-number">0</span>]
image.save(<span class="hljs-string">&quot;chroma.png&quot;</span>)`,wrap:!1}}),q=new Le({props:{title:"Loading from a single file",local:"loading-from-a-single-file",headingTag:"h2"}}),ee=new dt({props:{code:"aW1wb3J0JTIwdG9yY2glMEFmcm9tJTIwZGlmZnVzZXJzJTIwaW1wb3J0JTIwQ2hyb21hVHJhbnNmb3JtZXIyRE1vZGVsJTJDJTIwQ2hyb21hUGlwZWxpbmUlMEElMEFtb2RlbF9pZCUyMCUzRCUyMCUyMmxvZGVzdG9uZXMlMkZDaHJvbWExLUhEJTIyJTBBZHR5cGUlMjAlM0QlMjB0b3JjaC5iZmxvYXQxNiUwQSUwQXRyYW5zZm9ybWVyJTIwJTNEJTIwQ2hyb21hVHJhbnNmb3JtZXIyRE1vZGVsLmZyb21fc2luZ2xlX2ZpbGUoJTIyaHR0cHMlM0ElMkYlMkZodWdnaW5nZmFjZS5jbyUyRmxvZGVzdG9uZXMlMkZDaHJvbWExLUhEJTJGYmxvYiUyRm1haW4lMkZDaHJvbWExLUhELnNhZmV0ZW5zb3JzJTIyJTJDJTIwdG9yY2hfZHR5cGUlM0RkdHlwZSklMEElMEFwaXBlJTIwJTNEJTIwQ2hyb21hUGlwZWxpbmUuZnJvbV9wcmV0cmFpbmVkKG1vZGVsX2lkJTJDJTIwdHJhbnNmb3JtZXIlM0R0cmFuc2Zvcm1lciUyQyUyMHRvcmNoX2R0eXBlJTNEZHR5cGUpJTBBcGlwZS5lbmFibGVfbW9kZWxfY3B1X29mZmxvYWQoKSUwQSUwQXByb21wdCUyMCUzRCUyMCU1QiUwQSUyMCUyMCUyMCUyMCUyMkElMjBoaWdoLWZhc2hpb24lMjBjbG9zZS11cCUyMHBvcnRyYWl0JTIwb2YlMjBhJTIwYmxvbmRlJTIwd29tYW4lMjBpbiUyMGNsZWFyJTIwc3VuZ2xhc3Nlcy4lMjBUaGUlMjBpbWFnZSUyMHVzZXMlMjBhJTIwYm9sZCUyMHRlYWwlMjBhbmQlMjByZWQlMjBjb2xvciUyMHNwbGl0JTIwZm9yJTIwZHJhbWF0aWMlMjBsaWdodGluZy4lMjBUaGUlMjBiYWNrZ3JvdW5kJTIwaXMlMjBhJTIwc2ltcGxlJTIwdGVhbC1ncmVlbi4lMjBUaGUlMjBwaG90byUyMGlzJTIwc2hhcnAlMjBhbmQlMjB3ZWxsLWNvbXBvc2VkJTJDJTIwYW5kJTIwaXMlMjBkZXNpZ25lZCUyMGZvciUyMHZpZXdpbmclMjB3aXRoJTIwYW5hZ2x5cGglMjAzRCUyMGdsYXNzZXMlMjBmb3IlMjBvcHRpbWFsJTIwZWZmZWN0LiUyMEl0JTIwbG9va3MlMjBwcm9mZXNzaW9uYWxseSUyMGRvbmUuJTIyJTBBJTVEJTBBbmVnYXRpdmVfcHJvbXB0JTIwJTNEJTIwJTIwJTVCJTIybG93JTIwcXVhbGl0eSUyQyUyMHVnbHklMkMlMjB1bmZpbmlzaGVkJTJDJTIwb3V0JTIwb2YlMjBmb2N1cyUyQyUyMGRlZm9ybWVkJTJDJTIwZGlzZmlndXJlJTJDJTIwYmx1cnJ5JTJDJTIwc211ZGdlZCUyQyUyMHJlc3RyaWN0ZWQlMjBwYWxldHRlJTJDJTIwZmxhdCUyMGNvbG9ycyUyMiU1RCUwQSUwQWltYWdlJTIwJTNEJTIwcGlwZSglMEElMjAlMjAlMjAlMjBwcm9tcHQlM0Rwcm9tcHQlMkMlMEElMjAlMjAlMjAlMjBuZWdhdGl2ZV9wcm9tcHQlM0RuZWdhdGl2ZV9wcm9tcHQlMkMlMEElMjAlMjAlMjAlMjBnZW5lcmF0b3IlM0R0b3JjaC5HZW5lcmF0b3IoJTIyY3B1JTIyKS5tYW51YWxfc2VlZCg0MzMpJTJDJTBBJTIwJTIwJTIwJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRDQwJTJDJTBBJTIwJTIwJTIwJTIwZ3VpZGFuY2Vfc2NhbGUlM0QzLjAlMkMlMEEpLmltYWdlcyU1QjAlNUQlMEElMEFpbWFnZS5zYXZlKCUyMmNocm9tYS1zaW5nbGUtZmlsZS5wbmclMjIp",highlighted:`<span class="hljs-keyword">import</span> torch
<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> ChromaTransformer2DModel, ChromaPipeline
model_id = <span class="hljs-string">&quot;lodestones/Chroma1-HD&quot;</span>
dtype = torch.bfloat16
transformer = ChromaTransformer2DModel.from_single_file(<span class="hljs-string">&quot;https://huggingface.co/lodestones/Chroma1-HD/blob/main/Chroma1-HD.safetensors&quot;</span>, torch_dtype=dtype)
pipe = ChromaPipeline.from_pretrained(model_id, transformer=transformer, torch_dtype=dtype)
pipe.enable_model_cpu_offload()
prompt = [
<span class="hljs-string">&quot;A high-fashion close-up portrait of a blonde woman in clear sunglasses. The image uses a bold teal and red color split for dramatic lighting. The background is a simple teal-green. The photo is sharp and well-composed, and is designed for viewing with anaglyph 3D glasses for optimal effect. It looks professionally done.&quot;</span>
]
negative_prompt = [<span class="hljs-string">&quot;low quality, ugly, unfinished, out of focus, deformed, disfigure, blurry, smudged, restricted palette, flat colors&quot;</span>]
image = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
generator=torch.Generator(<span class="hljs-string">&quot;cpu&quot;</span>).manual_seed(<span class="hljs-number">433</span>),
num_inference_steps=<span class="hljs-number">40</span>,
guidance_scale=<span class="hljs-number">3.0</span>,
).images[<span class="hljs-number">0</span>]
image.save(<span class="hljs-string">&quot;chroma-single-file.png&quot;</span>)`,wrap:!1}}),te=new Le({props:{title:"ChromaPipeline",local:"diffusers.ChromaPipeline",headingTag:"h2"}}),ne=new U({props:{name:"class diffusers.ChromaPipeline",anchor:"diffusers.ChromaPipeline",parameters:[{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKL"},{name:"text_encoder",val:": T5EncoderModel"},{name:"tokenizer",val:": T5TokenizerFast"},{name:"transformer",val:": ChromaTransformer2DModel"},{name:"image_encoder",val:": CLIPVisionModelWithProjection = None"},{name:"feature_extractor",val:": CLIPImageProcessor = None"}],parametersDescription:[{anchor:"diffusers.ChromaPipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_12747/en/api/models/chroma_transformer#diffusers.ChromaTransformer2DModel">ChromaTransformer2DModel</a>) &#x2014;
Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.`,name:"transformer"},{anchor:"diffusers.ChromaPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_12747/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) &#x2014;
A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.ChromaPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_12747/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) &#x2014;
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representation`,name:"vae"},{anchor:"diffusers.ChromaPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>T5EncoderModel</code>) &#x2014;
<a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel" rel="nofollow">T5</a>, specifically
the <a href="https://huggingface.co/google/t5-v1_1-xxl" rel="nofollow">google/t5-v1_1-xxl</a> variant.`,name:"text_encoder"},{anchor:"diffusers.ChromaPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>T5TokenizerFast</code>) &#x2014;
Second Tokenizer of class
<a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast" rel="nofollow">T5TokenizerFast</a>.`,name:"tokenizer"}],source:"https://github.com/huggingface/diffusers/blob/vr_12747/src/diffusers/pipelines/chroma/pipeline_chroma.py#L151"}}),oe=new U({props:{name:"__call__",anchor:"diffusers.ChromaPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"num_inference_steps",val:": int = 35"},{name:"sigmas",val:": typing.Optional[typing.List[float]] = None"},{name:"guidance_scale",val:": float = 5.0"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"ip_adapter_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor], NoneType] = None"},{name:"ip_adapter_image_embeds",val:": typing.Optional[typing.List[torch.Tensor]] = None"},{name:"negative_ip_adapter_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor], NoneType] = None"},{name:"negative_ip_adapter_image_embeds",val:": typing.Optional[typing.List[torch.Tensor]] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"joint_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 512"}],parametersDescription:[{anchor:"diffusers.ChromaPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>.
instead.`,name:"prompt"},{anchor:"diffusers.ChromaPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation. If not defined, one has to pass
<code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is
not greater than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.ChromaPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) &#x2014;
The height in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"height"},{anchor:"diffusers.ChromaPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) &#x2014;
The width in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"width"},{anchor:"diffusers.ChromaPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 50) &#x2014;
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.ChromaPipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) &#x2014;
Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in
their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed
will be used.`,name:"sigmas"},{anchor:"diffusers.ChromaPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 3.5) &#x2014;
Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion
Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2.
of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting
<code>guidance_scale &gt; 1</code>. Higher guidance scale encourages to generate images that are closely linked to
the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.ChromaPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.ChromaPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) &#x2014;
One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a>
to make generation deterministic.`,name:"generator"},{anchor:"diffusers.ChromaPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will be generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.ChromaPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.ChromaPipeline.__call__.ip_adapter_image",description:"<strong>ip_adapter_image</strong> &#x2014; (<code>PipelineImageInput</code>, <em>optional</em>): Optional image input to work with IP Adapters.",name:"ip_adapter_image"},{anchor:"diffusers.ChromaPipeline.__call__.ip_adapter_image_embeds",description:`<strong>ip_adapter_image_embeds</strong> (<code>List[torch.Tensor]</code>, <em>optional</em>) &#x2014;
Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
IP-adapters. Each element should be a tensor of shape <code>(batch_size, num_images, emb_dim)</code>. If not
provided, embeddings are computed from the <code>ip_adapter_image</code> input argument.`,name:"ip_adapter_image_embeds"},{anchor:"diffusers.ChromaPipeline.__call__.negative_ip_adapter_image",description:`<strong>negative_ip_adapter_image</strong> &#x2014;
(<code>PipelineImageInput</code>, <em>optional</em>): Optional image input to work with IP Adapters.`,name:"negative_ip_adapter_image"},{anchor:"diffusers.ChromaPipeline.__call__.negative_ip_adapter_image_embeds",description:`<strong>negative_ip_adapter_image_embeds</strong> (<code>List[torch.Tensor]</code>, <em>optional</em>) &#x2014;
Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
IP-adapters. Each element should be a tensor of shape <code>(batch_size, num_images, emb_dim)</code>. If not
provided, embeddings are computed from the <code>ip_adapter_image</code> input argument.`,name:"negative_ip_adapter_image_embeds"},{anchor:"diffusers.ChromaPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input
argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.ChromaPipeline.__call__.prompt_attention_mask",description:`<strong>prompt_attention_mask</strong> (torch.Tensor, <em>optional</em>) &#x2014;
Attention mask for the prompt embeddings. Used to mask out padding tokens in the prompt sequence.
Chroma requires a single padding token remain unmasked. Please refer to
<a href="https://huggingface.co/lodestones/Chroma#tldr-masking-t5-padding-tokens-enhanced-fidelity-and-increased-stability-during-training" rel="nofollow">https://huggingface.co/lodestones/Chroma#tldr-masking-t5-padding-tokens-enhanced-fidelity-and-increased-stability-during-training</a>`,name:"prompt_attention_mask"},{anchor:"diffusers.ChromaPipeline.__call__.negative_prompt_attention_mask",description:`<strong>negative_prompt_attention_mask</strong> (torch.Tensor, <em>optional</em>) &#x2014;
Attention mask for the negative prompt embeddings. Used to mask out padding tokens in the negative
prompt sequence. Chroma requires a single padding token remain unmasked. PLease refer to
<a href="https://huggingface.co/lodestones/Chroma#tldr-masking-t5-padding-tokens-enhanced-fidelity-and-increased-stability-during-training" rel="nofollow">https://huggingface.co/lodestones/Chroma#tldr-masking-t5-padding-tokens-enhanced-fidelity-and-increased-stability-during-training</a>`,name:"negative_prompt_attention_mask"},{anchor:"diffusers.ChromaPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;pil&quot;</code>) &#x2014;
The output format of the generate image. Choose between
<a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.ChromaPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
Whether or not to return a <code>~pipelines.flux.ChromaPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.ChromaPipeline.__call__.joint_attention_kwargs",description:`<strong>joint_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014;
A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under
<code>self.processor</code> in
<a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"joint_attention_kwargs"},{anchor:"diffusers.ChromaPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) &#x2014;
A function that calls at the end of each denoising steps during the inference. The function is called
with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by
<code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.ChromaPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) &#x2014;
The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list
will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the
<code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.ChromaPipeline.__call__.max_sequence_length",description:"<strong>max_sequence_length</strong> (<code>int</code> defaults to 512) &#x2014; Maximum sequence length to use with the <code>prompt</code>.",name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_12747/src/diffusers/pipelines/chroma/pipeline_chroma.py#L641",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>~pipelines.chroma.ChromaPipelineOutput</code> if
<code>return_dict</code> is True, otherwise a <code>tuple</code>. When returning a tuple, the first element is a list with the
generated images.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>~pipelines.chroma.ChromaPipelineOutput</code> or <code>tuple</code></p>
`}}),P=new dn({props:{anchor:"diffusers.ChromaPipeline.__call__.example",$$slots:{default:[Mn]},$$scope:{ctx:be}}}),ae=new U({props:{name:"disable_vae_slicing",anchor:"diffusers.ChromaPipeline.disable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12747/src/diffusers/pipelines/chroma/pipeline_chroma.py#L523"}}),se=new U({props:{name:"disable_vae_tiling",anchor:"diffusers.ChromaPipeline.disable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12747/src/diffusers/pipelines/chroma/pipeline_chroma.py#L550"}}),ie=new U({props:{name:"enable_vae_slicing",anchor:"diffusers.ChromaPipeline.enable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12747/src/diffusers/pipelines/chroma/pipeline_chroma.py#L510"}}),re=new U({props:{name:"enable_vae_tiling",anchor:"diffusers.ChromaPipeline.enable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12747/src/diffusers/pipelines/chroma/pipeline_chroma.py#L536"}}),le=new U({props:{name:"encode_prompt",anchor:"diffusers.ChromaPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"do_classifier_free_guidance",val:": bool = True"},{name:"max_sequence_length",val:": int = 512"},{name:"lora_scale",val:": typing.Optional[float] = None"}],parametersDescription:[{anchor:"diffusers.ChromaPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
prompt to be encoded`,name:"prompt"},{anchor:"diffusers.ChromaPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt not to guide the image generation. If not defined, one has to pass <code>negative_prompt_embeds</code>
instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.ChromaPipeline.encode_prompt.device",description:`<strong>device</strong> &#x2014; (<code>torch.device</code>):
torch device`,name:"device"},{anchor:"diffusers.ChromaPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) &#x2014;
number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.ChromaPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.ChromaPipeline.encode_prompt.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>, <em>optional</em>) &#x2014;
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.`,name:"lora_scale"}],source:"https://github.com/huggingface/diffusers/blob/vr_12747/src/diffusers/pipelines/chroma/pipeline_chroma.py#L265"}}),me=new Le({props:{title:"ChromaImg2ImgPipeline",local:"diffusers.ChromaImg2ImgPipeline",headingTag:"h2"}}),pe=new U({props:{name:"class diffusers.ChromaImg2ImgPipeline",anchor:"diffusers.ChromaImg2ImgPipeline",parameters:[{name:"scheduler",val:": FlowMatchEulerDiscreteScheduler"},{name:"vae",val:": AutoencoderKL"},{name:"text_encoder",val:": T5EncoderModel"},{name:"tokenizer",val:": T5TokenizerFast"},{name:"transformer",val:": ChromaTransformer2DModel"},{name:"image_encoder",val:": CLIPVisionModelWithProjection = None"},{name:"feature_extractor",val:": CLIPImageProcessor = None"}],parametersDescription:[{anchor:"diffusers.ChromaImg2ImgPipeline.transformer",description:`<strong>transformer</strong> (<a href="/docs/diffusers/pr_12747/en/api/models/chroma_transformer#diffusers.ChromaTransformer2DModel">ChromaTransformer2DModel</a>) &#x2014;
Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.`,name:"transformer"},{anchor:"diffusers.ChromaImg2ImgPipeline.scheduler",description:`<strong>scheduler</strong> (<a href="/docs/diffusers/pr_12747/en/api/schedulers/flow_match_euler_discrete#diffusers.FlowMatchEulerDiscreteScheduler">FlowMatchEulerDiscreteScheduler</a>) &#x2014;
A scheduler to be used in combination with <code>transformer</code> to denoise the encoded image latents.`,name:"scheduler"},{anchor:"diffusers.ChromaImg2ImgPipeline.vae",description:`<strong>vae</strong> (<a href="/docs/diffusers/pr_12747/en/api/models/autoencoderkl#diffusers.AutoencoderKL">AutoencoderKL</a>) &#x2014;
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representation`,name:"vae"},{anchor:"diffusers.ChromaImg2ImgPipeline.text_encoder",description:`<strong>text_encoder</strong> (<code>T5EncoderModel</code>) &#x2014;
<a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel" rel="nofollow">T5</a>, specifically
the <a href="https://huggingface.co/google/t5-v1_1-xxl" rel="nofollow">google/t5-v1_1-xxl</a> variant.`,name:"text_encoder"},{anchor:"diffusers.ChromaImg2ImgPipeline.tokenizer",description:`<strong>tokenizer</strong> (<code>T5TokenizerFast</code>) &#x2014;
Second Tokenizer of class
<a href="https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast" rel="nofollow">T5TokenizerFast</a>.`,name:"tokenizer"}],source:"https://github.com/huggingface/diffusers/blob/vr_12747/src/diffusers/pipelines/chroma/pipeline_chroma_img2img.py#L163"}}),de=new U({props:{name:"__call__",anchor:"diffusers.ChromaImg2ImgPipeline.__call__",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor]] = None"},{name:"height",val:": typing.Optional[int] = None"},{name:"width",val:": typing.Optional[int] = None"},{name:"num_inference_steps",val:": int = 35"},{name:"sigmas",val:": typing.Optional[typing.List[float]] = None"},{name:"guidance_scale",val:": float = 5.0"},{name:"strength",val:": float = 0.9"},{name:"num_images_per_prompt",val:": typing.Optional[int] = 1"},{name:"generator",val:": typing.Union[torch._C.Generator, typing.List[torch._C.Generator], NoneType] = None"},{name:"latents",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"ip_adapter_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor], NoneType] = None"},{name:"ip_adapter_image_embeds",val:": typing.Optional[typing.List[torch.Tensor]] = None"},{name:"negative_ip_adapter_image",val:": typing.Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, typing.List[PIL.Image.Image], typing.List[numpy.ndarray], typing.List[torch.Tensor], NoneType] = None"},{name:"negative_ip_adapter_image_embeds",val:": typing.Optional[typing.List[torch.Tensor]] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_attention_mask",val:": typing.Optional[<built-in method tensor of type object at 0x7fd2435836a0>] = None"},{name:"output_type",val:": typing.Optional[str] = 'pil'"},{name:"return_dict",val:": bool = True"},{name:"joint_attention_kwargs",val:": typing.Optional[typing.Dict[str, typing.Any]] = None"},{name:"callback_on_step_end",val:": typing.Optional[typing.Callable[[int, int, typing.Dict], NoneType]] = None"},{name:"callback_on_step_end_tensor_inputs",val:": typing.List[str] = ['latents']"},{name:"max_sequence_length",val:": int = 512"}],parametersDescription:[{anchor:"diffusers.ChromaImg2ImgPipeline.__call__.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts to guide the image generation. If not defined, one has to pass <code>prompt_embeds</code>.
instead.`,name:"prompt"},{anchor:"diffusers.ChromaImg2ImgPipeline.__call__.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt or prompts not to guide the image generation. If not defined, one has to pass
<code>negative_prompt_embeds</code> instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is
not greater than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.ChromaImg2ImgPipeline.__call__.height",description:`<strong>height</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) &#x2014;
The height in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"height"},{anchor:"diffusers.ChromaImg2ImgPipeline.__call__.width",description:`<strong>width</strong> (<code>int</code>, <em>optional</em>, defaults to self.unet.config.sample_size * self.vae_scale_factor) &#x2014;
The width in pixels of the generated image. This is set to 1024 by default for the best results.`,name:"width"},{anchor:"diffusers.ChromaImg2ImgPipeline.__call__.num_inference_steps",description:`<strong>num_inference_steps</strong> (<code>int</code>, <em>optional</em>, defaults to 35) &#x2014;
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.`,name:"num_inference_steps"},{anchor:"diffusers.ChromaImg2ImgPipeline.__call__.sigmas",description:`<strong>sigmas</strong> (<code>List[float]</code>, <em>optional</em>) &#x2014;
Custom sigmas to use for the denoising process with schedulers which support a <code>sigmas</code> argument in
their <code>set_timesteps</code> method. If not defined, the default behavior when <code>num_inference_steps</code> is passed
will be used.`,name:"sigmas"},{anchor:"diffusers.ChromaImg2ImgPipeline.__call__.guidance_scale",description:`<strong>guidance_scale</strong> (<code>float</code>, <em>optional</em>, defaults to 3.5) &#x2014;
Guidance scale as defined in <a href="https://huggingface.co/papers/2207.12598" rel="nofollow">Classifier-Free Diffusion
Guidance</a>. <code>guidance_scale</code> is defined as <code>w</code> of equation 2.
of <a href="https://huggingface.co/papers/2205.11487" rel="nofollow">Imagen Paper</a>. Guidance scale is enabled by setting
<code>guidance_scale &gt; 1</code>. Higher guidance scale encourages to generate images that are closely linked to
the text <code>prompt</code>, usually at the expense of lower image quality.`,name:"guidance_scale"},{anchor:"diffusers.ChromaImg2ImgPipeline.__call__.strength",description:`<strong>strength</strong> (\`float, <em>optional</em>, defaults to 0.9) &#x2014;
Conceptually, indicates how much to transform the reference image. Must be between 0 and 1. image will
be used as a starting point, adding more noise to it the larger the strength. The number of denoising
steps depends on the amount of noise initially added. When strength is 1, added noise will be maximum
and the denoising process will run for the full number of iterations specified in num_inference_steps.
A value of 1, therefore, essentially ignores image.`,name:"strength"},{anchor:"diffusers.ChromaImg2ImgPipeline.__call__.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>, <em>optional</em>, defaults to 1) &#x2014;
The number of images to generate per prompt.`,name:"num_images_per_prompt"},{anchor:"diffusers.ChromaImg2ImgPipeline.__call__.generator",description:`<strong>generator</strong> (<code>torch.Generator</code> or <code>List[torch.Generator]</code>, <em>optional</em>) &#x2014;
One or a list of <a href="https://pytorch.org/docs/stable/generated/torch.Generator.html" rel="nofollow">torch generator(s)</a>
to make generation deterministic.`,name:"generator"},{anchor:"diffusers.ChromaImg2ImgPipeline.__call__.latents",description:`<strong>latents</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will be generated by sampling using the supplied random <code>generator</code>.`,name:"latents"},{anchor:"diffusers.ChromaImg2ImgPipeline.__call__.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.ChromaImg2ImgPipeline.__call__.ip_adapter_image",description:"<strong>ip_adapter_image</strong> &#x2014; (<code>PipelineImageInput</code>, <em>optional</em>): Optional image input to work with IP Adapters.",name:"ip_adapter_image"},{anchor:"diffusers.ChromaImg2ImgPipeline.__call__.ip_adapter_image_embeds",description:`<strong>ip_adapter_image_embeds</strong> (<code>List[torch.Tensor]</code>, <em>optional</em>) &#x2014;
Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
IP-adapters. Each element should be a tensor of shape <code>(batch_size, num_images, emb_dim)</code>. If not
provided, embeddings are computed from the <code>ip_adapter_image</code> input argument.`,name:"ip_adapter_image_embeds"},{anchor:"diffusers.ChromaImg2ImgPipeline.__call__.negative_ip_adapter_image",description:`<strong>negative_ip_adapter_image</strong> &#x2014;
(<code>PipelineImageInput</code>, <em>optional</em>): Optional image input to work with IP Adapters.`,name:"negative_ip_adapter_image"},{anchor:"diffusers.ChromaImg2ImgPipeline.__call__.negative_ip_adapter_image_embeds",description:`<strong>negative_ip_adapter_image_embeds</strong> (<code>List[torch.Tensor]</code>, <em>optional</em>) &#x2014;
Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
IP-adapters. Each element should be a tensor of shape <code>(batch_size, num_images, emb_dim)</code>. If not
provided, embeddings are computed from the <code>ip_adapter_image</code> input argument.`,name:"negative_ip_adapter_image_embeds"},{anchor:"diffusers.ChromaImg2ImgPipeline.__call__.negative_prompt_embeds",description:`<strong>negative_prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt
weighting. If not provided, negative_prompt_embeds will be generated from <code>negative_prompt</code> input
argument.`,name:"negative_prompt_embeds"},{anchor:"diffusers.ChromaImg2ImgPipeline.__call__.prompt_attention_mask",description:`<strong>prompt_attention_mask</strong> (torch.Tensor, <em>optional</em>) &#x2014;
Attention mask for the prompt embeddings. Used to mask out padding tokens in the prompt sequence.
Chroma requires a single padding token remain unmasked. Please refer to
<a href="https://huggingface.co/lodestones/Chroma#tldr-masking-t5-padding-tokens-enhanced-fidelity-and-increased-stability-during-training" rel="nofollow">https://huggingface.co/lodestones/Chroma#tldr-masking-t5-padding-tokens-enhanced-fidelity-and-increased-stability-during-training</a>`,name:"prompt_attention_mask"},{anchor:"diffusers.ChromaImg2ImgPipeline.__call__.negative_prompt_attention_mask",description:`<strong>negative_prompt_attention_mask</strong> (torch.Tensor, <em>optional</em>) &#x2014;
Attention mask for the negative prompt embeddings. Used to mask out padding tokens in the negative
prompt sequence. Chroma requires a single padding token remain unmasked. PLease refer to
<a href="https://huggingface.co/lodestones/Chroma#tldr-masking-t5-padding-tokens-enhanced-fidelity-and-increased-stability-during-training" rel="nofollow">https://huggingface.co/lodestones/Chroma#tldr-masking-t5-padding-tokens-enhanced-fidelity-and-increased-stability-during-training</a>`,name:"negative_prompt_attention_mask"},{anchor:"diffusers.ChromaImg2ImgPipeline.__call__.output_type",description:`<strong>output_type</strong> (<code>str</code>, <em>optional</em>, defaults to <code>&quot;pil&quot;</code>) &#x2014;
The output format of the generate image. Choose between
<a href="https://pillow.readthedocs.io/en/stable/" rel="nofollow">PIL</a>: <code>PIL.Image.Image</code> or <code>np.array</code>.`,name:"output_type"},{anchor:"diffusers.ChromaImg2ImgPipeline.__call__.return_dict",description:`<strong>return_dict</strong> (<code>bool</code>, <em>optional</em>, defaults to <code>True</code>) &#x2014;
Whether or not to return a <code>~pipelines.flux.ChromaPipelineOutput</code> instead of a plain tuple.`,name:"return_dict"},{anchor:"diffusers.ChromaImg2ImgPipeline.__call__.joint_attention_kwargs",description:`<strong>joint_attention_kwargs</strong> (<code>dict</code>, <em>optional</em>) &#x2014;
A kwargs dictionary that if specified is passed along to the <code>AttentionProcessor</code> as defined under
<code>self.processor</code> in
<a href="https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py" rel="nofollow">diffusers.models.attention_processor</a>.`,name:"joint_attention_kwargs"},{anchor:"diffusers.ChromaImg2ImgPipeline.__call__.callback_on_step_end",description:`<strong>callback_on_step_end</strong> (<code>Callable</code>, <em>optional</em>) &#x2014;
A function that calls at the end of each denoising steps during the inference. The function is called
with the following arguments: <code>callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)</code>. <code>callback_kwargs</code> will include a list of all tensors as specified by
<code>callback_on_step_end_tensor_inputs</code>.`,name:"callback_on_step_end"},{anchor:"diffusers.ChromaImg2ImgPipeline.__call__.callback_on_step_end_tensor_inputs",description:`<strong>callback_on_step_end_tensor_inputs</strong> (<code>List</code>, <em>optional</em>) &#x2014;
The list of tensor inputs for the <code>callback_on_step_end</code> function. The tensors specified in the list
will be passed as <code>callback_kwargs</code> argument. You will only be able to include variables listed in the
<code>._callback_tensor_inputs</code> attribute of your pipeline class.`,name:"callback_on_step_end_tensor_inputs"},{anchor:"diffusers.ChromaImg2ImgPipeline.__call__.max_sequence_length",description:"<strong>max_sequence_length</strong> (<code>int</code> defaults to 512) &#x2014; Maximum sequence length to use with the <code>prompt</code>.",name:"max_sequence_length"}],source:"https://github.com/huggingface/diffusers/blob/vr_12747/src/diffusers/pipelines/chroma/pipeline_chroma_img2img.py#L700",returnDescription:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>~pipelines.chroma.ChromaPipelineOutput</code> if
<code>return_dict</code> is True, otherwise a <code>tuple</code>. When returning a tuple, the first element is a list with the
generated images.</p>
`,returnType:`<script context="module">export const metadata = 'undefined';<\/script>
<p><code>~pipelines.chroma.ChromaPipelineOutput</code> or <code>tuple</code></p>
`}}),N=new dn({props:{anchor:"diffusers.ChromaImg2ImgPipeline.__call__.example",$$slots:{default:[vn]},$$scope:{ctx:be}}}),ce=new U({props:{name:"disable_vae_slicing",anchor:"diffusers.ChromaImg2ImgPipeline.disable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12747/src/diffusers/pipelines/chroma/pipeline_chroma_img2img.py#L555"}}),ge=new U({props:{name:"disable_vae_tiling",anchor:"diffusers.ChromaImg2ImgPipeline.disable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12747/src/diffusers/pipelines/chroma/pipeline_chroma_img2img.py#L582"}}),he=new U({props:{name:"enable_vae_slicing",anchor:"diffusers.ChromaImg2ImgPipeline.enable_vae_slicing",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12747/src/diffusers/pipelines/chroma/pipeline_chroma_img2img.py#L542"}}),fe=new U({props:{name:"enable_vae_tiling",anchor:"diffusers.ChromaImg2ImgPipeline.enable_vae_tiling",parameters:[],source:"https://github.com/huggingface/diffusers/blob/vr_12747/src/diffusers/pipelines/chroma/pipeline_chroma_img2img.py#L568"}}),ue=new U({props:{name:"encode_prompt",anchor:"diffusers.ChromaImg2ImgPipeline.encode_prompt",parameters:[{name:"prompt",val:": typing.Union[str, typing.List[str]]"},{name:"negative_prompt",val:": typing.Union[str, typing.List[str]] = None"},{name:"device",val:": typing.Optional[torch.device] = None"},{name:"num_images_per_prompt",val:": int = 1"},{name:"prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_embeds",val:": typing.Optional[torch.Tensor] = None"},{name:"prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"negative_prompt_attention_mask",val:": typing.Optional[torch.Tensor] = None"},{name:"do_classifier_free_guidance",val:": bool = True"},{name:"max_sequence_length",val:": int = 512"},{name:"lora_scale",val:": typing.Optional[float] = None"}],parametersDescription:[{anchor:"diffusers.ChromaImg2ImgPipeline.encode_prompt.prompt",description:`<strong>prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
prompt to be encoded`,name:"prompt"},{anchor:"diffusers.ChromaImg2ImgPipeline.encode_prompt.negative_prompt",description:`<strong>negative_prompt</strong> (<code>str</code> or <code>List[str]</code>, <em>optional</em>) &#x2014;
The prompt not to guide the image generation. If not defined, one has to pass <code>negative_prompt_embeds</code>
instead. Ignored when not using guidance (i.e., ignored if <code>guidance_scale</code> is less than <code>1</code>).`,name:"negative_prompt"},{anchor:"diffusers.ChromaImg2ImgPipeline.encode_prompt.device",description:`<strong>device</strong> &#x2014; (<code>torch.device</code>):
torch device`,name:"device"},{anchor:"diffusers.ChromaImg2ImgPipeline.encode_prompt.num_images_per_prompt",description:`<strong>num_images_per_prompt</strong> (<code>int</code>) &#x2014;
number of images that should be generated per prompt`,name:"num_images_per_prompt"},{anchor:"diffusers.ChromaImg2ImgPipeline.encode_prompt.prompt_embeds",description:`<strong>prompt_embeds</strong> (<code>torch.Tensor</code>, <em>optional</em>) &#x2014;
Pre-generated text embeddings. Can be used to easily tweak text inputs, <em>e.g.</em> prompt weighting. If not
provided, text embeddings will be generated from <code>prompt</code> input argument.`,name:"prompt_embeds"},{anchor:"diffusers.ChromaImg2ImgPipeline.encode_prompt.lora_scale",description:`<strong>lora_scale</strong> (<code>float</code>, <em>optional</em>) &#x2014;
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.`,name:"lora_scale"}],source:"https://github.com/huggingface/diffusers/blob/vr_12747/src/diffusers/pipelines/chroma/pipeline_chroma_img2img.py#L292"}}),_e=new yn({props:{source:"https://github.com/huggingface/diffusers/blob/main/docs/source/en/api/pipelines/chroma.md"}}),{c(){u=i("meta"),k=o(),J=i("p"),v=o(),p(I.$$.fragment),m=o(),p(x.$$.fragment),Ee=o(),G=i("div"),G.innerHTML=Nt,Ve=o(),Q=i("p"),Q.textContent=Ht,Ne=o(),R=i("p"),R.textContent=zt,He=o(),Y=i("ul"),Y.innerHTML=Dt,ze=o(),B=i("blockquote"),B.innerHTML=Xt,De=o(),p(F.$$.fragment),Xe=o(),p(A.$$.fragment),Qe=o(),p(q.$$.fragment),Re=o(),S=i("p"),S.innerHTML=Qt,Ye=o(),O=i("p"),O.textContent=Rt,Fe=o(),K=i("p"),K.textContent=Yt,Ae=o(),p(ee.$$.fragment),qe=o(),p(te.$$.fragment),Se=o(),b=i("div"),p(ne.$$.fragment),ct=o(),ye=i("p"),ye.textContent=Ft,gt=o(),Me=i("p"),Me.innerHTML=At,ht=o(),Z=i("div"),p(oe.$$.fragment),ft=o(),ve=i("p"),ve.textContent=qt,ut=o(),p(P.$$.fragment),_t=o(),W=i("div"),p(ae.$$.fragment),bt=o(),we=i("p"),we.innerHTML=St,yt=o(),L=i("div"),p(se.$$.fragment),Mt=o(),Te=i("p"),Te.innerHTML=Ot,vt=o(),E=i("div"),p(ie.$$.fragment),wt=o(),Ie=i("p"),Ie.textContent=Kt,Tt=o(),V=i("div"),p(re.$$.fragment),It=o(),Ce=i("p"),Ce.textContent=en,Ct=o(),Je=i("div"),p(le.$$.fragment),Oe=o(),p(me.$$.fragment),Ke=o(),y=i("div"),p(pe.$$.fragment),Jt=o(),xe=i("p"),xe.textContent=tn,xt=o(),Ue=i("p"),Ue.innerHTML=nn,Ut=o(),j=i("div"),p(de.$$.fragment),kt=o(),ke=i("p"),ke.textContent=on,Zt=o(),p(N.$$.fragment),jt=o(),H=i("div"),p(ce.$$.fragment),$t=o(),Ze=i("p"),Ze.innerHTML=an,Gt=o(),z=i("div"),p(ge.$$.fragment),Bt=o(),je=i("p"),je.innerHTML=sn,Pt=o(),D=i("div"),p(he.$$.fragment),Wt=o(),$e=i("p"),$e.textContent=rn,Lt=o(),X=i("div"),p(fe.$$.fragment),Et=o(),Ge=i("p"),Ge.textContent=ln,Vt=o(),Be=i("div"),p(ue.$$.fragment),et=o(),p(_e.$$.fragment),tt=o(),We=i("p"),this.h()},l(e){const s=_n("svelte-u9bgzb",document.head);u=r(s,"META",{name:!0,content:!0}),s.forEach(n),k=a(e),J=r(e,"P",{}),C(J).forEach(n),v=a(e),d(I.$$.fragment,e),m=a(e),d(x.$$.fragment,e),Ee=a(e),G=r(e,"DIV",{class:!0,"data-svelte-h":!0}),_(G)!=="svelte-1elo7hh"&&(G.innerHTML=Nt),Ve=a(e),Q=r(e,"P",{"data-svelte-h":!0}),_(Q)!=="svelte-zz935e"&&(Q.textContent=Ht),Ne=a(e),R=r(e,"P",{"data-svelte-h":!0}),_(R)!=="svelte-9mb6m9"&&(R.textContent=zt),He=a(e),Y=r(e,"UL",{"data-svelte-h":!0}),_(Y)!=="svelte-1mx6of9"&&(Y.innerHTML=Dt),ze=a(e),B=r(e,"BLOCKQUOTE",{class:!0,"data-svelte-h":!0}),_(B)!=="svelte-1hm6mkx"&&(B.innerHTML=Xt),De=a(e),d(F.$$.fragment,e),Xe=a(e),d(A.$$.fragment,e),Qe=a(e),d(q.$$.fragment,e),Re=a(e),S=r(e,"P",{"data-svelte-h":!0}),_(S)!=="svelte-1dlqcbc"&&(S.innerHTML=Qt),Ye=a(e),O=r(e,"P",{"data-svelte-h":!0}),_(O)!=="svelte-1fix8nw"&&(O.textContent=Rt),Fe=a(e),K=r(e,"P",{"data-svelte-h":!0}),_(K)!=="svelte-15rpvn4"&&(K.textContent=Yt),Ae=a(e),d(ee.$$.fragment,e),qe=a(e),d(te.$$.fragment,e),Se=a(e),b=r(e,"DIV",{class:!0});var M=C(b);d(ne.$$.fragment,M),ct=a(M),ye=r(M,"P",{"data-svelte-h":!0}),_(ye)!=="svelte-x23rez"&&(ye.textContent=Ft),gt=a(M),Me=r(M,"P",{"data-svelte-h":!0}),_(Me)!=="svelte-au8uhu"&&(Me.innerHTML=At),ht=a(M),Z=r(M,"DIV",{class:!0});var $=C(Z);d(oe.$$.fragment,$),ft=a($),ve=r($,"P",{"data-svelte-h":!0}),_(ve)!=="svelte-v78lg8"&&(ve.textContent=qt),ut=a($),d(P.$$.fragment,$),$.forEach(n),_t=a(M),W=r(M,"DIV",{class:!0});var ot=C(W);d(ae.$$.fragment,ot),bt=a(ot),we=r(ot,"P",{"data-svelte-h":!0}),_(we)!=="svelte-1s3c06i"&&(we.innerHTML=St),ot.forEach(n),yt=a(M),L=r(M,"DIV",{class:!0});var at=C(L);d(se.$$.fragment,at),Mt=a(at),Te=r(at,"P",{"data-svelte-h":!0}),_(Te)!=="svelte-pkn4ui"&&(Te.innerHTML=Ot),at.forEach(n),vt=a(M),E=r(M,"DIV",{class:!0});var st=C(E);d(ie.$$.fragment,st),wt=a(st),Ie=r(st,"P",{"data-svelte-h":!0}),_(Ie)!=="svelte-14bnrb6"&&(Ie.textContent=Kt),st.forEach(n),Tt=a(M),V=r(M,"DIV",{class:!0});var it=C(V);d(re.$$.fragment,it),It=a(it),Ce=r(it,"P",{"data-svelte-h":!0}),_(Ce)!=="svelte-1xwrf7t"&&(Ce.textContent=en),it.forEach(n),Ct=a(M),Je=r(M,"DIV",{class:!0});var mn=C(Je);d(le.$$.fragment,mn),mn.forEach(n),M.forEach(n),Oe=a(e),d(me.$$.fragment,e),Ke=a(e),y=r(e,"DIV",{class:!0});var w=C(y);d(pe.$$.fragment,w),Jt=a(w),xe=r(w,"P",{"data-svelte-h":!0}),_(xe)!=="svelte-1mk0zn"&&(xe.textContent=tn),xt=a(w),Ue=r(w,"P",{"data-svelte-h":!0}),_(Ue)!=="svelte-au8uhu"&&(Ue.innerHTML=nn),Ut=a(w),j=r(w,"DIV",{class:!0});var Pe=C(j);d(de.$$.fragment,Pe),kt=a(Pe),ke=r(Pe,"P",{"data-svelte-h":!0}),_(ke)!=="svelte-v78lg8"&&(ke.textContent=on),Zt=a(Pe),d(N.$$.fragment,Pe),Pe.forEach(n),jt=a(w),H=r(w,"DIV",{class:!0});var rt=C(H);d(ce.$$.fragment,rt),$t=a(rt),Ze=r(rt,"P",{"data-svelte-h":!0}),_(Ze)!=="svelte-1s3c06i"&&(Ze.innerHTML=an),rt.forEach(n),Gt=a(w),z=r(w,"DIV",{class:!0});var lt=C(z);d(ge.$$.fragment,lt),Bt=a(lt),je=r(lt,"P",{"data-svelte-h":!0}),_(je)!=="svelte-pkn4ui"&&(je.innerHTML=sn),lt.forEach(n),Pt=a(w),D=r(w,"DIV",{class:!0});var mt=C(D);d(he.$$.fragment,mt),Wt=a(mt),$e=r(mt,"P",{"data-svelte-h":!0}),_($e)!=="svelte-14bnrb6"&&($e.textContent=rn),mt.forEach(n),Lt=a(w),X=r(w,"DIV",{class:!0});var pt=C(X);d(fe.$$.fragment,pt),Et=a(pt),Ge=r(pt,"P",{"data-svelte-h":!0}),_(Ge)!=="svelte-1xwrf7t"&&(Ge.textContent=ln),pt.forEach(n),Vt=a(w),Be=r(w,"DIV",{class:!0});var pn=C(Be);d(ue.$$.fragment,pn),pn.forEach(n),w.forEach(n),et=a(e),d(_e.$$.fragment,e),tt=a(e),We=r(e,"P",{}),C(We).forEach(n),this.h()},h(){T(u,"name","hf:doc:metadata"),T(u,"content",Tn),T(G,"class","flex flex-wrap space-x-1"),T(B,"class","tip"),T(Z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(W,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(L,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(E,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(V,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(Je,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(b,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(j,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(H,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(z,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(D,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(X,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(Be,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),T(y,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,s){t(document.head,u),l(e,k,s),l(e,J,s),l(e,v,s),c(I,e,s),l(e,m,s),c(x,e,s),l(e,Ee,s),l(e,G,s),l(e,Ve,s),l(e,Q,s),l(e,Ne,s),l(e,R,s),l(e,He,s),l(e,Y,s),l(e,ze,s),l(e,B,s),l(e,De,s),c(F,e,s),l(e,Xe,s),c(A,e,s),l(e,Qe,s),c(q,e,s),l(e,Re,s),l(e,S,s),l(e,Ye,s),l(e,O,s),l(e,Fe,s),l(e,K,s),l(e,Ae,s),c(ee,e,s),l(e,qe,s),c(te,e,s),l(e,Se,s),l(e,b,s),c(ne,b,null),t(b,ct),t(b,ye),t(b,gt),t(b,Me),t(b,ht),t(b,Z),c(oe,Z,null),t(Z,ft),t(Z,ve),t(Z,ut),c(P,Z,null),t(b,_t),t(b,W),c(ae,W,null),t(W,bt),t(W,we),t(b,yt),t(b,L),c(se,L,null),t(L,Mt),t(L,Te),t(b,vt),t(b,E),c(ie,E,null),t(E,wt),t(E,Ie),t(b,Tt),t(b,V),c(re,V,null),t(V,It),t(V,Ce),t(b,Ct),t(b,Je),c(le,Je,null),l(e,Oe,s),c(me,e,s),l(e,Ke,s),l(e,y,s),c(pe,y,null),t(y,Jt),t(y,xe),t(y,xt),t(y,Ue),t(y,Ut),t(y,j),c(de,j,null),t(j,kt),t(j,ke),t(j,Zt),c(N,j,null),t(y,jt),t(y,H),c(ce,H,null),t(H,$t),t(H,Ze),t(y,Gt),t(y,z),c(ge,z,null),t(z,Bt),t(z,je),t(y,Pt),t(y,D),c(he,D,null),t(D,Wt),t(D,$e),t(y,Lt),t(y,X),c(fe,X,null),t(X,Et),t(X,Ge),t(y,Vt),t(y,Be),c(ue,Be,null),l(e,et,s),c(_e,e,s),l(e,tt,s),l(e,We,s),nt=!0},p(e,[s]){const M={};s&2&&(M.$$scope={dirty:s,ctx:e}),P.$set(M);const $={};s&2&&($.$$scope={dirty:s,ctx:e}),N.$set($)},i(e){nt||(g(I.$$.fragment,e),g(x.$$.fragment,e),g(F.$$.fragment,e),g(A.$$.fragment,e),g(q.$$.fragment,e),g(ee.$$.fragment,e),g(te.$$.fragment,e),g(ne.$$.fragment,e),g(oe.$$.fragment,e),g(P.$$.fragment,e),g(ae.$$.fragment,e),g(se.$$.fragment,e),g(ie.$$.fragment,e),g(re.$$.fragment,e),g(le.$$.fragment,e),g(me.$$.fragment,e),g(pe.$$.fragment,e),g(de.$$.fragment,e),g(N.$$.fragment,e),g(ce.$$.fragment,e),g(ge.$$.fragment,e),g(he.$$.fragment,e),g(fe.$$.fragment,e),g(ue.$$.fragment,e),g(_e.$$.fragment,e),nt=!0)},o(e){h(I.$$.fragment,e),h(x.$$.fragment,e),h(F.$$.fragment,e),h(A.$$.fragment,e),h(q.$$.fragment,e),h(ee.$$.fragment,e),h(te.$$.fragment,e),h(ne.$$.fragment,e),h(oe.$$.fragment,e),h(P.$$.fragment,e),h(ae.$$.fragment,e),h(se.$$.fragment,e),h(ie.$$.fragment,e),h(re.$$.fragment,e),h(le.$$.fragment,e),h(me.$$.fragment,e),h(pe.$$.fragment,e),h(de.$$.fragment,e),h(N.$$.fragment,e),h(ce.$$.fragment,e),h(ge.$$.fragment,e),h(he.$$.fragment,e),h(fe.$$.fragment,e),h(ue.$$.fragment,e),h(_e.$$.fragment,e),nt=!1},d(e){e&&(n(k),n(J),n(v),n(m),n(Ee),n(G),n(Ve),n(Q),n(Ne),n(R),n(He),n(Y),n(ze),n(B),n(De),n(Xe),n(Qe),n(Re),n(S),n(Ye),n(O),n(Fe),n(K),n(Ae),n(qe),n(Se),n(b),n(Oe),n(Ke),n(y),n(et),n(tt),n(We)),n(u),f(I,e),f(x,e),f(F,e),f(A,e),f(q,e),f(ee,e),f(te,e),f(ne),f(oe),f(P),f(ae),f(se),f(ie),f(re),f(le),f(me,e),f(pe),f(de),f(N),f(ce),f(ge),f(he),f(fe),f(ue),f(_e,e)}}}const Tn='{"title":"Chroma","local":"chroma","sections":[{"title":"Inference","local":"inference","sections":[],"depth":2},{"title":"Loading from a single file","local":"loading-from-a-single-file","sections":[],"depth":2},{"title":"ChromaPipeline","local":"diffusers.ChromaPipeline","sections":[],"depth":2},{"title":"ChromaImg2ImgPipeline","local":"diffusers.ChromaImg2ImgPipeline","sections":[],"depth":2}],"depth":1}';function In(be){return hn(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class $n extends fn{constructor(u){super(),un(this,u,In,wn,gn,{})}}export{$n as component};

Xet Storage Details

Size:
68.7 kB
·
Xet hash:
fd349cc666c8121c05f2af618304a3a75f6d02af5db5027eff5b78c01bcc395d

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.