Curlyblaze commited on
Commit
b4e351f
·
verified ·
1 Parent(s): 05d441b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -19
app.py CHANGED
@@ -2,34 +2,63 @@ import gradio as gr
2
  from audiocraft.models import MusicGen
3
  from audiocraft.data.audio import audio_write
4
  import torch
 
5
 
6
- # Load the model - 'melody' is key for your use case
 
7
  model = MusicGen.get_pretrained('facebook/musicgen-melody')
8
 
9
  def finish_my_song(audio_input, text_description, duration):
10
- # Set the length of the generated idea
 
 
 
11
  model.set_generation_params(duration=duration)
12
 
13
- # Process your chords/bass + your text prompt
14
- # melody_wavs expects the audio you want the AI to "follow"
 
 
 
 
 
 
 
15
  wav = model.generate_with_chroma(
16
  descriptions=[text_description],
17
- melody_wavs=audio_input[1],
18
- sr=audio_input[0]
19
  )
20
 
21
- # Return the generated audio
22
- return wav[0].cpu().numpy()
 
 
 
 
23
 
24
- interface = gr.Interface(
25
- fn=finish_my_song,
26
- inputs=[
27
- gr.Audio(label="Upload your Chords & Bass"),
28
- gr.Textbox(label="Describe what to add (e.g., drums, lead synth, mood)"),
29
- gr.Slider(minimum=10, maximum=30, value=15, label="Seconds to generate")
30
- ],
31
- outputs=gr.Audio(label="Full Song Idea"),
32
- title="The Song Finisher"
33
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
- interface.launch()
 
2
  from audiocraft.models import MusicGen
3
  from audiocraft.data.audio import audio_write
4
  import torch
5
+ import numpy as np
6
 
7
+ # Load the model (using 'melody' specifically for your chords/bass)
8
+ # This will download the first time you run the app
9
  model = MusicGen.get_pretrained('facebook/musicgen-melody')
10
 
11
  def finish_my_song(audio_input, text_description, duration):
12
+ if audio_input is None:
13
+ return None, "Please upload an audio file first!"
14
+
15
+ # Set how long the AI should play for
16
  model.set_generation_params(duration=duration)
17
 
18
+ # Get the sampling rate and the audio data from your upload
19
+ sr, data = audio_input
20
+
21
+ # Convert to the format the AI needs
22
+ audio_tensor = torch.from_numpy(data).float().t().unsqueeze(0)
23
+ if audio_tensor.shape[1] > 1: # Convert stereo to mono if needed
24
+ audio_tensor = audio_tensor.mean(dim=1, keepdim=True)
25
+
26
+ # Generate the song based on your description + your audio
27
  wav = model.generate_with_chroma(
28
  descriptions=[text_description],
29
+ melody_wavs=audio_tensor,
30
+ sr=sr
31
  )
32
 
33
+ # Save to a temporary file for downloading
34
+ output_path = "ai_completion_idea"
35
+ audio_write(output_path, wav[0].cpu(), model.sample_rate, strategy="loudness")
36
+
37
+ # Return the file path so Gradio shows a player and a download button
38
+ return f"{output_path}.wav"
39
 
40
+ # Create a sleek Dark Mode interface
41
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
42
+ gr.Markdown("# 🎵 The Song Finisher AI")
43
+ gr.Markdown("Upload your **Chords and Bass**, describe the vibe, and let AI build the rest.")
44
+
45
+ with gr.Row():
46
+ with gr.Column():
47
+ audio_in = gr.Audio(label="Step 1: Upload Chords/Bass (WAV or MP3)")
48
+ prompt = gr.Textbox(
49
+ label="Step 2: Describe the Vibe",
50
+ placeholder="e.g., Add heavy trap drums, a wide synth lead, and a dark atmosphere..."
51
+ )
52
+ length = gr.Slider(minimum=5, maximum=30, value=15, step=5, label="Seconds to Generate")
53
+ submit_btn = gr.Button("Finish My Song", variant="primary")
54
+
55
+ with gr.Column():
56
+ audio_out = gr.Audio(label="Step 3: Listen & Download", type="filepath")
57
+
58
+ submit_btn.click(
59
+ fn=finish_my_song,
60
+ inputs=[audio_in, prompt, length],
61
+ outputs=[audio_out]
62
+ )
63
 
64
+ demo.launch()