Spaces:
Paused
Paused
| import gradio as gr | |
| import os | |
| # Import the model | |
| model = jukebox.make_vqvae(MODELS['5B_LYRICS'], device="cpu") | |
| # Generate music | |
| def generate_music(temperature=1.0, top_k=10, beam_width=5): | |
| z = torch.randn(1, 1024) | |
| audio = model.sample(z, temperature=temperature, top_k=top_k, beam_width=beam_width) | |
| return audio | |
| # Input audio | |
| def input_audio(): | |
| audio_file = input("Enter the path to the audio file: ") | |
| audio_data = librosa.load(audio_file) | |
| return audio_data | |
| # Generate music from the input audio | |
| def generate_music_from_audio(audio_data): | |
| z = model.encode(audio_data) | |
| audio = model.decode(z) | |
| return audio | |
| # Save the music | |
| def save_music(audio, filename): | |
| librosa.output(filename, audio, sr=44100) | |
| # Play the music | |
| def play_music(audio): | |
| Audio(audio) | |
| # Create the Gradio interface | |
| app = gr.Interface( | |
| generate_music, | |
| inputs=[gr.inputs.Slider(label="Temperature", min=0.0, max=1.0, step=0.1), | |
| gr.inputs.Slider(label="Top K", min=1, max=10, step=1), | |
| gr.inputs.Slider(label="Beam Width", min=1, max=10, step=1)], | |
| outputs=gr.outputs.Audio(), | |
| title="OpenAI Jukebox", | |
| description="Generate music using OpenAI Jukebox", | |
| allow_screenshot=True, | |
| clear_output=True | |
| ) | |
| # Run the app | |
| app.launch() |