Master0fNone commited on
Commit
9a6673f
·
verified ·
1 Parent(s): 4c3dee6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -11
app.py CHANGED
@@ -1,27 +1,24 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
- import torch
4
  import soundfile as sf
 
 
5
 
6
- # Load your pre-trained text-to-music model (example: using a music generation model)
7
  model = pipeline("text-to-music", model="facebook/musicgen-small")
8
 
9
- # Function to process text and generate music
10
  def generate_music(text_input):
11
- # This part depends on the model you use
12
- music_output = model(text_input) # Generate music based on input text
 
 
 
13
 
14
- # Save the music to a file or return it directly
15
- output_file = "/path/to/output.wav"
16
- sf.write(output_file, music_output, 22050) # Example: Saving at 22050 Hz sample rate
17
- return output_file
18
 
19
- # Set up the Gradio interface
20
  iface = gr.Interface(
21
  fn=generate_music,
22
  inputs=gr.Textbox(label="Enter Text for Music"),
23
  outputs=gr.Audio(label="Generated Music")
24
  )
25
 
26
- # Launch the app
27
  iface.launch()
 
1
  import gradio as gr
2
  from transformers import pipeline
 
3
  import soundfile as sf
4
+ import io
5
+ import numpy as np
6
 
 
7
  model = pipeline("text-to-music", model="facebook/musicgen-small")
8
 
 
9
  def generate_music(text_input):
10
+ music_output = model(text_input)
11
+
12
+ audio_buffer = io.BytesIO()
13
+ sf.write(audio_buffer, music_output, 22050, format='WAV')
14
+ audio_buffer.seek(0)
15
 
16
+ return audio_buffer
 
 
 
17
 
 
18
  iface = gr.Interface(
19
  fn=generate_music,
20
  inputs=gr.Textbox(label="Enter Text for Music"),
21
  outputs=gr.Audio(label="Generated Music")
22
  )
23
 
 
24
  iface.launch()