Buckets:

hf-doc-build/doc / diffusers /main /en /_app /pages /using-diffusers /distilled_sd.mdx-hf-doc-builder.js
rtrm's picture
download
raw
18.3 kB
import{S as ws,i as gs,s as Is,e as i,k as p,w,t as d,M as _s,c as a,d as s,m as f,a as o,x as g,h as c,b as r,N as es,G as l,g as n,y as I,q as _,o as E,B as j,v as Es}from"../../chunks/vendor-hf-doc-builder.js";import{T as js}from"../../chunks/Tip-hf-doc-builder.js";import{I as bs}from"../../chunks/IconCopyLink-hf-doc-builder.js";import{C as oe}from"../../chunks/CodeBlock-hf-doc-builder.js";import{D as vs}from"../../chunks/DocNotebookDropdown-hf-doc-builder.js";function Vs(re){let u,v,m,T,J;return{c(){u=i("p"),v=d("Read this "),m=i("a"),T=d("blog post"),J=d(" to learn more about how knowledge distillation training works to produce a faster, smaller, and cheaper generative model."),this.h()},l(M){u=a(M,"P",{});var y=o(u);v=c(y,"Read this "),m=a(y,"A",{href:!0,rel:!0});var V=o(m);T=c(V,"blog post"),V.forEach(s),J=c(y," to learn more about how knowledge distillation training works to produce a faster, smaller, and cheaper generative model."),y.forEach(s),this.h()},h(){r(m,"href","https://huggingface.co/blog/sd_distillation"),r(m,"rel","nofollow")},m(M,y){n(M,u,y),l(u,v),l(u,m),l(m,T),l(u,J)},d(M){M&&s(u)}}}function Ns(re){let u,v,m,T,J,M,y,V,Se,pe,R,fe,h,Re,ie,Be,Qe,B,Ce,Ge,de,N,ce,q,We,ue,Q,me,Y,$e,Me,C,Te,z,ke,ye,G,he,U,W,H,ss,Ae,L,Fe,Xe,$,O,ls,De,K,xe,Je,b,Z,ae,k,Pe,ne,qe,Ue,S,Ye,A,ze,He,be,F,we,ee,Le,ge,X,Ie,D,x,se,ts,Oe,le,Ke,_e;return M=new bs({}),R=new vs({props:{classNames:"absolute z-10 right-0 top-0",options:[{label:"Mixed",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers_doc/en/distilled_sd.ipynb"},{label:"PyTorch",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers_doc/en/pytorch/distilled_sd.ipynb"},{label:"TensorFlow",value:"https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers_doc/en/tensorflow/distilled_sd.ipynb"},{label:"Mixed",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/diffusers_doc/en/distilled_sd.ipynb"},{label:"PyTorch",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/diffusers_doc/en/pytorch/distilled_sd.ipynb"},{label:"TensorFlow",value:"https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/diffusers_doc/en/tensorflow/distilled_sd.ipynb"}]}}),N=new js({props:{$$slots:{default:[Vs]},$$scope:{ctx:re}}}),Q=new oe({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMFN0YWJsZURpZmZ1c2lvblBpcGVsaW5lJTBBaW1wb3J0JTIwdG9yY2glMEElMEFkaXN0aWxsZWQlMjAlM0QlMjBTdGFibGVEaWZmdXNpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIybm90YS1haSUyRmJrLXNkbS1zbWFsbCUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUyQyUyMHVzZV9zYWZldGVuc29ycyUzRFRydWUlMkMlMEEpLnRvKCUyMmN1ZGElMjIpJTBBJTBBb3JpZ2luYWwlMjAlM0QlMjBTdGFibGVEaWZmdXNpb25QaXBlbGluZS5mcm9tX3ByZXRyYWluZWQoJTBBJTIwJTIwJTIwJTIwJTIyQ29tcFZpcyUyRnN0YWJsZS1kaWZmdXNpb24tdjEtNCUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUyQyUyMHVzZV9zYWZldGVuc29ycyUzRFRydWUlMkMlMEEpLnRvKCUyMmN1ZGElMjIp",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> StableDiffusionPipeline
<span class="hljs-keyword">import</span> torch
distilled = StableDiffusionPipeline.from_pretrained(
<span class="hljs-string">&quot;nota-ai/bk-sdm-small&quot;</span>, torch_dtype=torch.float16, use_safetensors=<span class="hljs-literal">True</span>,
).to(<span class="hljs-string">&quot;cuda&quot;</span>)
original = StableDiffusionPipeline.from_pretrained(
<span class="hljs-string">&quot;CompVis/stable-diffusion-v1-4&quot;</span>, torch_dtype=torch.float16, use_safetensors=<span class="hljs-literal">True</span>,
).to(<span class="hljs-string">&quot;cuda&quot;</span>)`}}),C=new oe({props:{code:"aW1wb3J0JTIwdGltZSUwQSUwQXNlZWQlMjAlM0QlMjAyMDIzJTBBZ2VuZXJhdG9yJTIwJTNEJTIwdG9yY2gubWFudWFsX3NlZWQoc2VlZCklMEElMEFOVU1fSVRFUlNfVE9fUlVOJTIwJTNEJTIwMyUwQU5VTV9JTkZFUkVOQ0VfU1RFUFMlMjAlM0QlMjAyNSUwQU5VTV9JTUFHRVNfUEVSX1BST01QVCUyMCUzRCUyMDQlMEElMEFwcm9tcHQlMjAlM0QlMjAlMjJhJTIwZ29sZGVuJTIwdmFzZSUyMHdpdGglMjBkaWZmZXJlbnQlMjBmbG93ZXJzJTIyJTBBJTBBc3RhcnQlMjAlM0QlMjB0aW1lLnRpbWVfbnMoKSUwQWZvciUyMF8lMjBpbiUyMHJhbmdlKE5VTV9JVEVSU19UT19SVU4pJTNBJTBBJTIwJTIwJTIwJTIwaW1hZ2VzJTIwJTNEJTIwb3JpZ2luYWwoJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwcHJvbXB0JTJDJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwbnVtX2luZmVyZW5jZV9zdGVwcyUzRE5VTV9JTkZFUkVOQ0VfU1RFUFMlMkMlMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjBnZW5lcmF0b3IlM0RnZW5lcmF0b3IlMkMlMEElMjAlMjAlMjAlMjAlMjAlMjAlMjAlMjBudW1faW1hZ2VzX3Blcl9wcm9tcHQlM0ROVU1fSU1BR0VTX1BFUl9QUk9NUFQlMEElMjAlMjAlMjAlMjApLmltYWdlcyUwQWVuZCUyMCUzRCUyMHRpbWUudGltZV9ucygpJTBBb3JpZ2luYWxfc2QlMjAlM0QlMjBmJTIyJTdCKGVuZCUyMC0lMjBzdGFydCklMjAlMkYlMjAxZTYlM0EuMWYlN0QlMjIlMEElMEFwcmludChmJTIyRXhlY3V0aW9uJTIwdGltZSUyMC0tJTIwJTdCb3JpZ2luYWxfc2QlN0QlMjBtcyU1Q24lMjIpJTBBJTIyRXhlY3V0aW9uJTIwdGltZSUyMC0tJTIwNDU3ODEuNSUyMG1zJTIy",highlighted:`<span class="hljs-keyword">import</span> time
seed = <span class="hljs-number">2023</span>
generator = torch.manual_seed(seed)
NUM_ITERS_TO_RUN = <span class="hljs-number">3</span>
NUM_INFERENCE_STEPS = <span class="hljs-number">25</span>
NUM_IMAGES_PER_PROMPT = <span class="hljs-number">4</span>
prompt = <span class="hljs-string">&quot;a golden vase with different flowers&quot;</span>
start = time.time_ns()
<span class="hljs-keyword">for</span> _ <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(NUM_ITERS_TO_RUN):
images = original(
prompt,
num_inference_steps=NUM_INFERENCE_STEPS,
generator=generator,
num_images_per_prompt=NUM_IMAGES_PER_PROMPT
).images
end = time.time_ns()
original_sd = <span class="hljs-string">f&quot;<span class="hljs-subst">{(end - start) / <span class="hljs-number">1e6</span>:<span class="hljs-number">.1</span>f}</span>&quot;</span>
<span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Execution time -- <span class="hljs-subst">{original_sd}</span> ms\\n&quot;</span>)
<span class="hljs-string">&quot;Execution time -- 45781.5 ms&quot;</span>`}}),G=new oe({props:{code:"c3RhcnQlMjAlM0QlMjB0aW1lLnRpbWVfbnMoKSUwQWZvciUyMF8lMjBpbiUyMHJhbmdlKE5VTV9JVEVSU19UT19SVU4pJTNBJTBBJTIwJTIwJTIwJTIwaW1hZ2VzJTIwJTNEJTIwZGlzdGlsbGVkKCUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMHByb21wdCUyQyUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0ROVU1fSU5GRVJFTkNFX1NURVBTJTJDJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwZ2VuZXJhdG9yJTNEZ2VuZXJhdG9yJTJDJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwbnVtX2ltYWdlc19wZXJfcHJvbXB0JTNETlVNX0lNQUdFU19QRVJfUFJPTVBUJTBBJTIwJTIwJTIwJTIwKS5pbWFnZXMlMEFlbmQlMjAlM0QlMjB0aW1lLnRpbWVfbnMoKSUwQSUwQWRpc3RpbGxlZF9zZCUyMCUzRCUyMGYlMjIlN0IoZW5kJTIwLSUyMHN0YXJ0KSUyMCUyRiUyMDFlNiUzQS4xZiU3RCUyMiUwQXByaW50KGYlMjJFeGVjdXRpb24lMjB0aW1lJTIwLS0lMjAlN0JkaXN0aWxsZWRfc2QlN0QlMjBtcyU1Q24lMjIpJTBBJTIyRXhlY3V0aW9uJTIwdGltZSUyMC0tJTIwMjk4ODQuMiUyMG1zJTIy",highlighted:`start = time.time_ns()
<span class="hljs-keyword">for</span> _ <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(NUM_ITERS_TO_RUN):
images = distilled(
prompt,
num_inference_steps=NUM_INFERENCE_STEPS,
generator=generator,
num_images_per_prompt=NUM_IMAGES_PER_PROMPT
).images
end = time.time_ns()
distilled_sd = <span class="hljs-string">f&quot;<span class="hljs-subst">{(end - start) / <span class="hljs-number">1e6</span>:<span class="hljs-number">.1</span>f}</span>&quot;</span>
<span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Execution time -- <span class="hljs-subst">{distilled_sd}</span> ms\\n&quot;</span>)
<span class="hljs-string">&quot;Execution time -- 29884.2 ms&quot;</span>`}}),k=new bs({}),F=new oe({props:{code:"ZnJvbSUyMGRpZmZ1c2VycyUyMGltcG9ydCUyMEF1dG9lbmNvZGVyVGlueSUwQSUwQWRpc3RpbGxlZC52YWUlMjAlM0QlMjBBdXRvZW5jb2RlclRpbnkuZnJvbV9wcmV0cmFpbmVkKCUwQSUyMCUyMCUyMCUyMCUyMnNheWFrcGF1bCUyRnRhZXNkLWRpZmZ1c2VycyUyMiUyQyUyMHRvcmNoX2R0eXBlJTNEdG9yY2guZmxvYXQxNiUyQyUyMHVzZV9zYWZldGVuc29ycyUzRFRydWUlMkMlMEEpLnRvKCUyMmN1ZGElMjIp",highlighted:`<span class="hljs-keyword">from</span> diffusers <span class="hljs-keyword">import</span> AutoencoderTiny
distilled.vae = AutoencoderTiny.from_pretrained(
<span class="hljs-string">&quot;sayakpaul/taesd-diffusers&quot;</span>, torch_dtype=torch.float16, use_safetensors=<span class="hljs-literal">True</span>,
).to(<span class="hljs-string">&quot;cuda&quot;</span>)`}}),X=new oe({props:{code:"c3RhcnQlMjAlM0QlMjB0aW1lLnRpbWVfbnMoKSUwQWZvciUyMF8lMjBpbiUyMHJhbmdlKE5VTV9JVEVSU19UT19SVU4pJTNBJTBBJTIwJTIwJTIwJTIwaW1hZ2VzJTIwJTNEJTIwZGlzdGlsbGVkKCUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMHByb21wdCUyQyUwQSUyMCUyMCUyMCUyMCUyMCUyMCUyMCUyMG51bV9pbmZlcmVuY2Vfc3RlcHMlM0ROVU1fSU5GRVJFTkNFX1NURVBTJTJDJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwZ2VuZXJhdG9yJTNEZ2VuZXJhdG9yJTJDJTBBJTIwJTIwJTIwJTIwJTIwJTIwJTIwJTIwbnVtX2ltYWdlc19wZXJfcHJvbXB0JTNETlVNX0lNQUdFU19QRVJfUFJPTVBUJTBBJTIwJTIwJTIwJTIwKS5pbWFnZXMlMEFlbmQlMjAlM0QlMjB0aW1lLnRpbWVfbnMoKSUwQSUwQWRpc3RpbGxlZF90aW55X3NkJTIwJTNEJTIwZiUyMiU3QihlbmQlMjAtJTIwc3RhcnQpJTIwJTJGJTIwMWU2JTNBLjFmJTdEJTIyJTBBcHJpbnQoZiUyMkV4ZWN1dGlvbiUyMHRpbWUlMjAtLSUyMCU3QmRpc3RpbGxlZF90aW55X3NkJTdEJTIwbXMlNUNuJTIyKSUwQSUyMkV4ZWN1dGlvbiUyMHRpbWUlMjAtLSUyMDI3MTY1LjclMjBtcyUyMg==",highlighted:`start = time.time_ns()
<span class="hljs-keyword">for</span> _ <span class="hljs-keyword">in</span> <span class="hljs-built_in">range</span>(NUM_ITERS_TO_RUN):
images = distilled(
prompt,
num_inference_steps=NUM_INFERENCE_STEPS,
generator=generator,
num_images_per_prompt=NUM_IMAGES_PER_PROMPT
).images
end = time.time_ns()
distilled_tiny_sd = <span class="hljs-string">f&quot;<span class="hljs-subst">{(end - start) / <span class="hljs-number">1e6</span>:<span class="hljs-number">.1</span>f}</span>&quot;</span>
<span class="hljs-built_in">print</span>(<span class="hljs-string">f&quot;Execution time -- <span class="hljs-subst">{distilled_tiny_sd}</span> ms\\n&quot;</span>)
<span class="hljs-string">&quot;Execution time -- 27165.7 ms&quot;</span>`}}),{c(){u=i("meta"),v=p(),m=i("h1"),T=i("a"),J=i("span"),w(M.$$.fragment),y=p(),V=i("span"),Se=d("Distilled Stable Diffusion inference"),pe=p(),w(R.$$.fragment),fe=p(),h=i("p"),Re=d("Stable Diffusion inference can be a computationally intensive process because it must iteratively denoise the latents to generate an image. To reduce the computational burden, you can use a "),ie=i("em"),Be=d("distilled"),Qe=d(" version of the Stable Diffusion model from "),B=i("a"),Ce=d("Nota AI"),Ge=d(". The distilled version of their Stable Diffusion model eliminates some of the residual and attention blocks from the UNet, reducing the model size by 51% and improving latency on CPU/GPU by 43%."),de=p(),w(N.$$.fragment),ce=p(),q=i("p"),We=d("Let\u2019s load the distilled Stable Diffusion model and compare it against the original Stable Diffusion model:"),ue=p(),w(Q.$$.fragment),me=p(),Y=i("p"),$e=d("Given a prompt, get the inference time for the original model:"),Me=p(),w(C.$$.fragment),Te=p(),z=i("p"),ke=d("Time the distilled model inference:"),ye=p(),w(G.$$.fragment),he=p(),U=i("div"),W=i("div"),H=i("img"),Ae=p(),L=i("figcaption"),Fe=d("original Stable Diffusion (45781.5 ms)"),Xe=p(),$=i("div"),O=i("img"),De=p(),K=i("figcaption"),xe=d("distilled Stable Diffusion (29884.2 ms)"),Je=p(),b=i("h2"),Z=i("a"),ae=i("span"),w(k.$$.fragment),Pe=p(),ne=i("span"),qe=d("Tiny AutoEncoder"),Ue=p(),S=i("p"),Ye=d("To speed inference up even more, use a tiny distilled version of the "),A=i("a"),ze=d("Stable Diffusion VAE"),He=d(" to denoise the latents into images. Replace the VAE in the distilled Stable Diffusion model with the tiny VAE:"),be=p(),w(F.$$.fragment),we=p(),ee=i("p"),Le=d("Time the distilled model and distilled VAE inference:"),ge=p(),w(X.$$.fragment),Ie=p(),D=i("div"),x=i("div"),se=i("img"),Oe=p(),le=i("figcaption"),Ke=d("distilled Stable Diffusion + Tiny AutoEncoder (27165.7 ms)"),this.h()},l(e){const t=_s('[data-svelte="svelte-1phssyn"]',document.head);u=a(t,"META",{name:!0,content:!0}),t.forEach(s),v=f(e),m=a(e,"H1",{class:!0});var P=o(m);T=a(P,"A",{id:!0,class:!0,href:!0});var is=o(T);J=a(is,"SPAN",{});var as=o(J);g(M.$$.fragment,as),as.forEach(s),is.forEach(s),y=f(P),V=a(P,"SPAN",{});var ns=o(V);Se=c(ns,"Distilled Stable Diffusion inference"),ns.forEach(s),P.forEach(s),pe=f(e),g(R.$$.fragment,e),fe=f(e),h=a(e,"P",{});var te=o(h);Re=c(te,"Stable Diffusion inference can be a computationally intensive process because it must iteratively denoise the latents to generate an image. To reduce the computational burden, you can use a "),ie=a(te,"EM",{});var os=o(ie);Be=c(os,"distilled"),os.forEach(s),Qe=c(te," version of the Stable Diffusion model from "),B=a(te,"A",{href:!0,rel:!0});var rs=o(B);Ce=c(rs,"Nota AI"),rs.forEach(s),Ge=c(te,". The distilled version of their Stable Diffusion model eliminates some of the residual and attention blocks from the UNet, reducing the model size by 51% and improving latency on CPU/GPU by 43%."),te.forEach(s),de=f(e),g(N.$$.fragment,e),ce=f(e),q=a(e,"P",{});var ps=o(q);We=c(ps,"Let\u2019s load the distilled Stable Diffusion model and compare it against the original Stable Diffusion model:"),ps.forEach(s),ue=f(e),g(Q.$$.fragment,e),me=f(e),Y=a(e,"P",{});var fs=o(Y);$e=c(fs,"Given a prompt, get the inference time for the original model:"),fs.forEach(s),Me=f(e),g(C.$$.fragment,e),Te=f(e),z=a(e,"P",{});var ds=o(z);ke=c(ds,"Time the distilled model inference:"),ds.forEach(s),ye=f(e),g(G.$$.fragment,e),he=f(e),U=a(e,"DIV",{class:!0});var Ee=o(U);W=a(Ee,"DIV",{});var je=o(W);H=a(je,"IMG",{class:!0,src:!0}),Ae=f(je),L=a(je,"FIGCAPTION",{class:!0});var cs=o(L);Fe=c(cs,"original Stable Diffusion (45781.5 ms)"),cs.forEach(s),je.forEach(s),Xe=f(Ee),$=a(Ee,"DIV",{});var ve=o($);O=a(ve,"IMG",{class:!0,src:!0}),De=f(ve),K=a(ve,"FIGCAPTION",{class:!0});var us=o(K);xe=c(us,"distilled Stable Diffusion (29884.2 ms)"),us.forEach(s),ve.forEach(s),Ee.forEach(s),Je=f(e),b=a(e,"H2",{class:!0});var Ve=o(b);Z=a(Ve,"A",{id:!0,class:!0,href:!0});var ms=o(Z);ae=a(ms,"SPAN",{});var Ms=o(ae);g(k.$$.fragment,Ms),Ms.forEach(s),ms.forEach(s),Pe=f(Ve),ne=a(Ve,"SPAN",{});var Ts=o(ne);qe=c(Ts,"Tiny AutoEncoder"),Ts.forEach(s),Ve.forEach(s),Ue=f(e),S=a(e,"P",{});var Ne=o(S);Ye=c(Ne,"To speed inference up even more, use a tiny distilled version of the "),A=a(Ne,"A",{href:!0,rel:!0});var ys=o(A);ze=c(ys,"Stable Diffusion VAE"),ys.forEach(s),He=c(Ne," to denoise the latents into images. Replace the VAE in the distilled Stable Diffusion model with the tiny VAE:"),Ne.forEach(s),be=f(e),g(F.$$.fragment,e),we=f(e),ee=a(e,"P",{});var hs=o(ee);Le=c(hs,"Time the distilled model and distilled VAE inference:"),hs.forEach(s),ge=f(e),g(X.$$.fragment,e),Ie=f(e),D=a(e,"DIV",{class:!0});var Js=o(D);x=a(Js,"DIV",{});var Ze=o(x);se=a(Ze,"IMG",{class:!0,src:!0}),Oe=f(Ze),le=a(Ze,"FIGCAPTION",{class:!0});var Us=o(le);Ke=c(Us,"distilled Stable Diffusion + Tiny AutoEncoder (27165.7 ms)"),Us.forEach(s),Ze.forEach(s),Js.forEach(s),this.h()},h(){r(u,"name","hf:doc:metadata"),r(u,"content",JSON.stringify(Zs)),r(T,"id","distilled-stable-diffusion-inference"),r(T,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(T,"href","#distilled-stable-diffusion-inference"),r(m,"class","relative group"),r(B,"href","https://huggingface.co/nota-ai"),r(B,"rel","nofollow"),r(H,"class","rounded-xl"),es(H.src,ss="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/original_sd.png")||r(H,"src",ss),r(L,"class","mt-2 text-center text-sm text-gray-500"),r(O,"class","rounded-xl"),es(O.src,ls="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/distilled_sd.png")||r(O,"src",ls),r(K,"class","mt-2 text-center text-sm text-gray-500"),r(U,"class","flex gap-4"),r(Z,"id","tiny-autoencoder"),r(Z,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),r(Z,"href","#tiny-autoencoder"),r(b,"class","relative group"),r(A,"href","https://huggingface.co/sayakpaul/taesdxl-diffusers"),r(A,"rel","nofollow"),r(se,"class","rounded-xl"),es(se.src,ts="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/distilled_sd_vae.png")||r(se,"src",ts),r(le,"class","mt-2 text-center text-sm text-gray-500"),r(D,"class","flex justify-center")},m(e,t){l(document.head,u),n(e,v,t),n(e,m,t),l(m,T),l(T,J),I(M,J,null),l(m,y),l(m,V),l(V,Se),n(e,pe,t),I(R,e,t),n(e,fe,t),n(e,h,t),l(h,Re),l(h,ie),l(ie,Be),l(h,Qe),l(h,B),l(B,Ce),l(h,Ge),n(e,de,t),I(N,e,t),n(e,ce,t),n(e,q,t),l(q,We),n(e,ue,t),I(Q,e,t),n(e,me,t),n(e,Y,t),l(Y,$e),n(e,Me,t),I(C,e,t),n(e,Te,t),n(e,z,t),l(z,ke),n(e,ye,t),I(G,e,t),n(e,he,t),n(e,U,t),l(U,W),l(W,H),l(W,Ae),l(W,L),l(L,Fe),l(U,Xe),l(U,$),l($,O),l($,De),l($,K),l(K,xe),n(e,Je,t),n(e,b,t),l(b,Z),l(Z,ae),I(k,ae,null),l(b,Pe),l(b,ne),l(ne,qe),n(e,Ue,t),n(e,S,t),l(S,Ye),l(S,A),l(A,ze),l(S,He),n(e,be,t),I(F,e,t),n(e,we,t),n(e,ee,t),l(ee,Le),n(e,ge,t),I(X,e,t),n(e,Ie,t),n(e,D,t),l(D,x),l(x,se),l(x,Oe),l(x,le),l(le,Ke),_e=!0},p(e,[t]){const P={};t&2&&(P.$$scope={dirty:t,ctx:e}),N.$set(P)},i(e){_e||(_(M.$$.fragment,e),_(R.$$.fragment,e),_(N.$$.fragment,e),_(Q.$$.fragment,e),_(C.$$.fragment,e),_(G.$$.fragment,e),_(k.$$.fragment,e),_(F.$$.fragment,e),_(X.$$.fragment,e),_e=!0)},o(e){E(M.$$.fragment,e),E(R.$$.fragment,e),E(N.$$.fragment,e),E(Q.$$.fragment,e),E(C.$$.fragment,e),E(G.$$.fragment,e),E(k.$$.fragment,e),E(F.$$.fragment,e),E(X.$$.fragment,e),_e=!1},d(e){s(u),e&&s(v),e&&s(m),j(M),e&&s(pe),j(R,e),e&&s(fe),e&&s(h),e&&s(de),j(N,e),e&&s(ce),e&&s(q),e&&s(ue),j(Q,e),e&&s(me),e&&s(Y),e&&s(Me),j(C,e),e&&s(Te),e&&s(z),e&&s(ye),j(G,e),e&&s(he),e&&s(U),e&&s(Je),e&&s(b),j(k),e&&s(Ue),e&&s(S),e&&s(be),j(F,e),e&&s(we),e&&s(ee),e&&s(ge),j(X,e),e&&s(Ie),e&&s(D)}}}const Zs={local:"distilled-stable-diffusion-inference",sections:[{local:"tiny-autoencoder",title:"Tiny AutoEncoder"}],title:"Distilled Stable Diffusion inference"};function Ss(re){return Es(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Ws extends ws{constructor(u){super();gs(this,u,Ss,Ns,Is,{})}}export{Ws as default,Zs as metadata};

Xet Storage Details

Size:
18.3 kB
·
Xet hash:
00a27f3159a5b7677dd303a1b1d8e037c5ea6aeb30ea32eff910fb8231123804

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.