Buckets:

hf-doc-build/doc / diffusers /main /en /_app /pages /api /pipelines /spectrogram_diffusion.mdx-hf-doc-builder.js
rtrm's picture
download
raw
13.4 kB
import{S as rt,i as st,s as ot,e as a,k as d,w as J,t as p,M as nt,c as r,d as t,m as c,a as o,x as L,h as u,b as n,N as lt,G as i,g as l,y as R,q as W,o as V,B as Z,v as ft}from"../../../chunks/vendor-hf-doc-builder.js";import{T as dt}from"../../../chunks/Tip-hf-doc-builder.js";import{D as Ce}from"../../../chunks/Docstring-hf-doc-builder.js";import{I as Fe}from"../../../chunks/IconCopyLink-hf-doc-builder.js";function ct(re){let f,A,h,v,_,m,q,y;return{c(){f=a("p"),A=p("Make sure to check out the Schedulers "),h=a("a"),v=p("guide"),_=p(" to learn how to explore the tradeoff between scheduler speed and quality, and see the "),m=a("a"),q=p("reuse components across pipelines"),y=p(" section to learn how to efficiently load the same components into multiple pipelines."),this.h()},l($){f=r($,"P",{});var g=o(f);A=u(g,"Make sure to check out the Schedulers "),h=r(g,"A",{href:!0});var w=o(h);v=u(w,"guide"),w.forEach(t),_=u(g," to learn how to explore the tradeoff between scheduler speed and quality, and see the "),m=r(g,"A",{href:!0});var b=o(m);q=u(b,"reuse components across pipelines"),b.forEach(t),y=u(g," section to learn how to efficiently load the same components into multiple pipelines."),g.forEach(t),this.h()},h(){n(h,"href","/using-diffusers/schedulers"),n(m,"href","/using-diffusers/loading#reuse-components-across-pipelines")},m($,g){l($,f,g),i(f,A),i(f,h),i(h,v),i(f,_),i(f,m),i(m,q),i(f,y)},d($){$&&t(f)}}}function pt(re){let f,A,h,v,_,m,q,y,$,g,w,b,ye,$e,se,j,Q,De,oe,S,Pe,z,Ee,Ie,ne,B,U,He,le,K,Ae,fe,x,de,D,M,X,T,Se,Y,xe,ce,P,O,Me,k,G,ke,ee,Ne,pe,E,N,te,C,qe,ie,ze,ue,I,F,Te,ae,Oe,he;return m=new Fe({}),x=new dt({props:{$$slots:{default:[ct]},$$scope:{ctx:re}}}),T=new Fe({}),O=new Ce({props:{name:"class diffusers.SpectrogramDiffusionPipeline",anchor:"diffusers.SpectrogramDiffusionPipeline",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/utils/dummy_transformers_and_torch_and_note_seq_objects.py#L5"}}),G=new Ce({props:{name:"__call__",anchor:"diffusers.SpectrogramDiffusionPipeline.__call__",parameters:[{name:"*args",val:""},{name:"**kwargs",val:""}]}}),C=new Fe({}),F=new Ce({props:{name:"class diffusers.AudioPipelineOutput",anchor:"diffusers.AudioPipelineOutput",parameters:[{name:"audios",val:": ndarray"}],parametersDescription:[{anchor:"diffusers.AudioPipelineOutput.audios",description:`<strong>audios</strong> (<code>np.ndarray</code>) &#x2014;
List of denoised audio samples of a NumPy array of shape <code>(batch_size, num_channels, sample_rate)</code>.`,name:"audios"}],source:"https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pipeline_utils.py#L126"}}),{c(){f=a("meta"),A=d(),h=a("h1"),v=a("a"),_=a("span"),J(m.$$.fragment),q=d(),y=a("span"),$=p("Spectrogram Diffusion"),g=d(),w=a("p"),b=a("a"),ye=p("Spectrogram Diffusion"),$e=p(" is by Curtis Hawthorne, Ian Simon, Adam Roberts, Neil Zeghidour, Josh Gardner, Ethan Manilow, and Jesse Engel."),se=d(),j=a("p"),Q=a("em"),De=p("An ideal music synthesizer should be both interactive and expressive, generating high-fidelity audio in realtime for arbitrary combinations of instruments and notes. Recent neural synthesizers have exhibited a tradeoff between domain-specific models that offer detailed control of only specific instruments, or raw waveform models that can train on any music but with minimal control and slow generation. In this work, we focus on a middle ground of neural synthesizers that can generate audio from MIDI sequences with arbitrary combinations of instruments in realtime. This enables training on a wide range of transcription datasets with a single model, which in turn offers note-level control of composition and instrumentation across a wide range of instruments. We use a simple two-stage process: MIDI to spectrograms with an encoder-decoder Transformer, then spectrograms to audio with a generative adversarial network (GAN) spectrogram inverter. We compare training the decoder as an autoregressive model and as a Denoising Diffusion Probabilistic Model (DDPM) and find that the DDPM approach is superior both qualitatively and as measured by audio reconstruction and Fr\xE9chet distance metrics. Given the interactivity and generality of this approach, we find this to be a promising first step towards interactive and expressive neural synthesis for arbitrary combinations of instruments and notes."),oe=d(),S=a("p"),Pe=p("The original codebase can be found at "),z=a("a"),Ee=p("magenta/music-spectrogram-diffusion"),Ie=p("."),ne=d(),B=a("p"),U=a("img"),le=d(),K=a("p"),Ae=p("As depicted above the model takes as input a MIDI file and tokenizes it into a sequence of 5 second intervals. Each tokenized interval then together with positional encodings is passed through the Note Encoder and its representation is concatenated with the previous window\u2019s generated spectrogram representation obtained via the Context Encoder. For the initial 5 second window this is set to zero. The resulting context is then used as conditioning to sample the denoised Spectrogram from the MIDI window and we concatenate this spectrogram to the final output as well as use it for the context of the next MIDI window. The process repeats till we have gone over all the MIDI inputs. Finally a MelGAN decoder converts the potentially long spectrogram to audio which is the final result of this pipeline."),fe=d(),J(x.$$.fragment),de=d(),D=a("h2"),M=a("a"),X=a("span"),J(T.$$.fragment),Se=d(),Y=a("span"),xe=p("SpectrogramDiffusionPipeline"),ce=d(),P=a("div"),J(O.$$.fragment),Me=d(),k=a("div"),J(G.$$.fragment),ke=d(),ee=a("p"),Ne=p("Call self as a function."),pe=d(),E=a("h2"),N=a("a"),te=a("span"),J(C.$$.fragment),qe=d(),ie=a("span"),ze=p("AudioPipelineOutput"),ue=d(),I=a("div"),J(F.$$.fragment),Te=d(),ae=a("p"),Oe=p("Output class for audio pipelines."),this.h()},l(e){const s=nt('[data-svelte="svelte-1phssyn"]',document.head);f=r(s,"META",{name:!0,content:!0}),s.forEach(t),A=c(e),h=r(e,"H1",{class:!0});var H=o(h);v=r(H,"A",{id:!0,class:!0,href:!0});var Je=o(v);_=r(Je,"SPAN",{});var Le=o(_);L(m.$$.fragment,Le),Le.forEach(t),Je.forEach(t),q=c(H),y=r(H,"SPAN",{});var Re=o(y);$=u(Re,"Spectrogram Diffusion"),Re.forEach(t),H.forEach(t),g=c(e),w=r(e,"P",{});var Ge=o(w);b=r(Ge,"A",{href:!0,rel:!0});var We=o(b);ye=u(We,"Spectrogram Diffusion"),We.forEach(t),$e=u(Ge," is by Curtis Hawthorne, Ian Simon, Adam Roberts, Neil Zeghidour, Josh Gardner, Ethan Manilow, and Jesse Engel."),Ge.forEach(t),se=c(e),j=r(e,"P",{});var Ve=o(j);Q=r(Ve,"EM",{});var Ze=o(Q);De=u(Ze,"An ideal music synthesizer should be both interactive and expressive, generating high-fidelity audio in realtime for arbitrary combinations of instruments and notes. Recent neural synthesizers have exhibited a tradeoff between domain-specific models that offer detailed control of only specific instruments, or raw waveform models that can train on any music but with minimal control and slow generation. In this work, we focus on a middle ground of neural synthesizers that can generate audio from MIDI sequences with arbitrary combinations of instruments in realtime. This enables training on a wide range of transcription datasets with a single model, which in turn offers note-level control of composition and instrumentation across a wide range of instruments. We use a simple two-stage process: MIDI to spectrograms with an encoder-decoder Transformer, then spectrograms to audio with a generative adversarial network (GAN) spectrogram inverter. We compare training the decoder as an autoregressive model and as a Denoising Diffusion Probabilistic Model (DDPM) and find that the DDPM approach is superior both qualitatively and as measured by audio reconstruction and Fr\xE9chet distance metrics. Given the interactivity and generality of this approach, we find this to be a promising first step towards interactive and expressive neural synthesis for arbitrary combinations of instruments and notes."),Ze.forEach(t),Ve.forEach(t),oe=c(e),S=r(e,"P",{});var me=o(S);Pe=u(me,"The original codebase can be found at "),z=r(me,"A",{href:!0,rel:!0});var je=o(z);Ee=u(je,"magenta/music-spectrogram-diffusion"),je.forEach(t),Ie=u(me,"."),me.forEach(t),ne=c(e),B=r(e,"P",{});var Be=o(B);U=r(Be,"IMG",{src:!0,alt:!0}),Be.forEach(t),le=c(e),K=r(e,"P",{});var Ue=o(K);Ae=u(Ue,"As depicted above the model takes as input a MIDI file and tokenizes it into a sequence of 5 second intervals. Each tokenized interval then together with positional encodings is passed through the Note Encoder and its representation is concatenated with the previous window\u2019s generated spectrogram representation obtained via the Context Encoder. For the initial 5 second window this is set to zero. The resulting context is then used as conditioning to sample the denoised Spectrogram from the MIDI window and we concatenate this spectrogram to the final output as well as use it for the context of the next MIDI window. The process repeats till we have gone over all the MIDI inputs. Finally a MelGAN decoder converts the potentially long spectrogram to audio which is the final result of this pipeline."),Ue.forEach(t),fe=c(e),L(x.$$.fragment,e),de=c(e),D=r(e,"H2",{class:!0});var ge=o(D);M=r(ge,"A",{id:!0,class:!0,href:!0});var Ke=o(M);X=r(Ke,"SPAN",{});var Qe=o(X);L(T.$$.fragment,Qe),Qe.forEach(t),Ke.forEach(t),Se=c(ge),Y=r(ge,"SPAN",{});var Xe=o(Y);xe=u(Xe,"SpectrogramDiffusionPipeline"),Xe.forEach(t),ge.forEach(t),ce=c(e),P=r(e,"DIV",{class:!0});var ve=o(P);L(O.$$.fragment,ve),Me=c(ve),k=r(ve,"DIV",{class:!0});var we=o(k);L(G.$$.fragment,we),ke=c(we),ee=r(we,"P",{});var Ye=o(ee);Ne=u(Ye,"Call self as a function."),Ye.forEach(t),we.forEach(t),ve.forEach(t),pe=c(e),E=r(e,"H2",{class:!0});var be=o(E);N=r(be,"A",{id:!0,class:!0,href:!0});var et=o(N);te=r(et,"SPAN",{});var tt=o(te);L(C.$$.fragment,tt),tt.forEach(t),et.forEach(t),qe=c(be),ie=r(be,"SPAN",{});var it=o(ie);ze=u(it,"AudioPipelineOutput"),it.forEach(t),be.forEach(t),ue=c(e),I=r(e,"DIV",{class:!0});var _e=o(I);L(F.$$.fragment,_e),Te=c(_e),ae=r(_e,"P",{});var at=o(ae);Oe=u(at,"Output class for audio pipelines."),at.forEach(t),_e.forEach(t),this.h()},h(){n(f,"name","hf:doc:metadata"),n(f,"content",JSON.stringify(ut)),n(v,"id","spectrogram-diffusion"),n(v,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),n(v,"href","#spectrogram-diffusion"),n(h,"class","relative group"),n(b,"href","https://huggingface.co/papers/2206.05408"),n(b,"rel","nofollow"),n(z,"href","https://github.com/magenta/music-spectrogram-diffusion"),n(z,"rel","nofollow"),lt(U.src,He="https://storage.googleapis.com/music-synthesis-with-spectrogram-diffusion/architecture.png")||n(U,"src",He),n(U,"alt","img"),n(M,"id","diffusers.SpectrogramDiffusionPipeline"),n(M,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),n(M,"href","#diffusers.SpectrogramDiffusionPipeline"),n(D,"class","relative group"),n(k,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),n(P,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8"),n(N,"id","diffusers.AudioPipelineOutput"),n(N,"class","header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full"),n(N,"href","#diffusers.AudioPipelineOutput"),n(E,"class","relative group"),n(I,"class","docstring border-l-2 border-t-2 pl-4 pt-3.5 border-gray-100 rounded-tl-xl mb-6 mt-8")},m(e,s){i(document.head,f),l(e,A,s),l(e,h,s),i(h,v),i(v,_),R(m,_,null),i(h,q),i(h,y),i(y,$),l(e,g,s),l(e,w,s),i(w,b),i(b,ye),i(w,$e),l(e,se,s),l(e,j,s),i(j,Q),i(Q,De),l(e,oe,s),l(e,S,s),i(S,Pe),i(S,z),i(z,Ee),i(S,Ie),l(e,ne,s),l(e,B,s),i(B,U),l(e,le,s),l(e,K,s),i(K,Ae),l(e,fe,s),R(x,e,s),l(e,de,s),l(e,D,s),i(D,M),i(M,X),R(T,X,null),i(D,Se),i(D,Y),i(Y,xe),l(e,ce,s),l(e,P,s),R(O,P,null),i(P,Me),i(P,k),R(G,k,null),i(k,ke),i(k,ee),i(ee,Ne),l(e,pe,s),l(e,E,s),i(E,N),i(N,te),R(C,te,null),i(E,qe),i(E,ie),i(ie,ze),l(e,ue,s),l(e,I,s),R(F,I,null),i(I,Te),i(I,ae),i(ae,Oe),he=!0},p(e,[s]){const H={};s&2&&(H.$$scope={dirty:s,ctx:e}),x.$set(H)},i(e){he||(W(m.$$.fragment,e),W(x.$$.fragment,e),W(T.$$.fragment,e),W(O.$$.fragment,e),W(G.$$.fragment,e),W(C.$$.fragment,e),W(F.$$.fragment,e),he=!0)},o(e){V(m.$$.fragment,e),V(x.$$.fragment,e),V(T.$$.fragment,e),V(O.$$.fragment,e),V(G.$$.fragment,e),V(C.$$.fragment,e),V(F.$$.fragment,e),he=!1},d(e){t(f),e&&t(A),e&&t(h),Z(m),e&&t(g),e&&t(w),e&&t(se),e&&t(j),e&&t(oe),e&&t(S),e&&t(ne),e&&t(B),e&&t(le),e&&t(K),e&&t(fe),Z(x,e),e&&t(de),e&&t(D),Z(T),e&&t(ce),e&&t(P),Z(O),Z(G),e&&t(pe),e&&t(E),Z(C),e&&t(ue),e&&t(I),Z(F)}}}const ut={local:"spectrogram-diffusion",sections:[{local:"diffusers.SpectrogramDiffusionPipeline",title:"SpectrogramDiffusionPipeline"},{local:"diffusers.AudioPipelineOutput",title:"AudioPipelineOutput"}],title:"Spectrogram Diffusion"};function ht(re){return ft(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class bt extends rt{constructor(f){super();st(this,f,ht,pt,ot,{})}}export{bt as default,ut as metadata};

Xet Storage Details

Size:
13.4 kB
·
Xet hash:
1d17bad1daa2613aa3bf5ffb94ead2928759519f85e341a6b70a5c50449b6902

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.