bdc-divya commited on
Commit
779c45c
·
1 Parent(s): 59124d7

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -0
app.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Import necessary libraries
2
+ from audiocraft.models import MusicGen # Import the MusicGen model for music generation
3
+ from audiocraft.data.audio import audio_write # Import audio_write function for saving audio
4
+ import gradio as gr # Import Gradio for creating a web interface
5
+ import torch # Import PyTorch for deep learning operations
6
+
7
+ # Load the pretrained MusicGen model (large)
8
+ model = MusicGen.get_pretrained("large")
9
+
10
+ # Set generation parameters for the model (e.g., duration of 8 seconds)
11
+ model.set_generation_params(duration=8)
12
+
13
+ # Function to generate music from text descriptions
14
+ def generate_music(description):
15
+ # Generate music based on the provided description
16
+ wav = model.generate([description])
17
+
18
+ # Save the generated audio as "output.wav" with loudness normalization at -14 dB LUFS.
19
+ audio_write("output.wav", wav[0].cpu(), model.sample_rate, strategy="loudness")
20
+
21
+ return "Audio generated and saved as 'output.wav'"
22
+
23
+ # Define a Gradio interface
24
+ iface = gr.Interface(
25
+ fn=generate_music, # Use the generate_music function for processing input
26
+ inputs="text", # Accept text input from the user
27
+ outputs="text", # Display a text output message
28
+ title="Music Generation from Descriptions", # Set the title of the web interface
29
+ description="Generate music based on descriptions.", # Provide a description
30
+ live=False # Set to False if you don't want real-time updates (for beginner-friendly interaction)
31
+ )
32
+
33
+ # Start the Gradio interface
34
+ iface.launch(debug=True)