bdc-divya commited on
Commit
6239fd2
·
1 Parent(s): 8b1f069

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -23
app.py CHANGED
@@ -1,33 +1,38 @@
1
- # Import necessary libraries
2
- from audiocraft.models import MusicGen # Import the MusicGen model for music generation
3
- from audiocraft.data.audio import audio_write # Import audio_write function for saving audio
4
- import gradio as gr # Import Gradio for creating a web interface
5
- import torch # Import PyTorch for deep learning operations
 
 
 
 
6
 
7
- # Load the pretrained MusicGen model (small)
8
  model = MusicGen.get_pretrained("facebook/musicgen-small")
 
9
 
10
- # Set generation parameters for the model (e.g., duration of 8 seconds)
11
- model.set_generation_params(duration=10)
12
-
13
- # Function to generate music from text descriptions
14
  def generate_music(description):
15
- # Generate music based on the provided description
16
  wav = model.generate([description])
17
-
18
- # Convert the audio waveform to a format that Gradio can display
19
- audio_data = wav[0].cpu().numpy().tobytes()
20
-
21
- return audio_data # Return the audio data
 
 
 
 
22
 
23
- # Define a Gradio interface
24
  iface = gr.Interface(
25
- fn=generate_music, # Use the generate_music function for processing input
26
- inputs="text", # Accept text input from the user
27
- outputs=gr.Audio(type="numpy", label="Audio"), # Display the generated audio as output
28
- title="Music Generation from Descriptions", # Set the title of the web interface
29
- description="Generate music based on descriptions.", # Provide a description
30
- live=False # Set to False if you don't want real-time updates (for beginner-friendly interaction)
31
  )
32
 
33
  iface.launch(debug=True)
 
1
+ import tempfile
2
+ from audiocraft.models import MusicGen
3
+ from audiocraft.data.audio import audio_write
4
+ import gradio as gr
5
+ import torch
6
+ import tempfile
7
+ import uuid
8
+ import os
9
+ from scipy.io.wavfile import write
10
 
11
+ # Load the model and set parameters
12
  model = MusicGen.get_pretrained("facebook/musicgen-small")
13
+ model.set_generation_params(duration=8)
14
 
 
 
 
 
15
  def generate_music(description):
16
+ # Generate audio
17
  wav = model.generate([description])
18
+ audio_array = wav.cpu().numpy().squeeze()
19
+ sample_rate = model.sample_rate
20
+ file_id = uuid.uuid1()
21
+ file_path = os.path.join(
22
+ tempfile.gettempdir(),
23
+ f'{file_id}.wav'
24
+ )
25
+ write(file_path, rate=sample_rate, data=audio_array)
26
+ return file_path
27
 
28
+ # Define Gradio interface with temporary file output
29
  iface = gr.Interface(
30
+ fn=generate_music,
31
+ inputs="text",
32
+ outputs=gr.components.Audio(type="filepath", label="Audio"),
33
+ title="Text to Audio Generation",
34
+ description="Generate audio based on text descriptions.",
35
+ live=False,
36
  )
37
 
38
  iface.launch(debug=True)