Math12393 commited on
Commit
f6b5965
·
verified ·
1 Parent(s): 82f6aaa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -27
app.py CHANGED
@@ -1,55 +1,63 @@
1
  import gradio as gr
2
- from audiocraft.models import MusicGen
3
  import torch
4
  import numpy as np
5
 
6
- # 1. Load the model (Small is safest for 16GB RAM)
7
- model = MusicGen.get_pretrained('facebook/musicgen-small')
 
8
 
9
- # 2. Define the CSS to hide footer, API, and settings
10
  custom_css = """
11
  footer {display: none !important;}
12
  .show-api {display: none !important;}
13
  button.reference {display: none !important;}
14
- #component-0 {margin-bottom: 24px;}
15
  """
16
 
17
- def generate_music(description, duration):
18
- if not description:
19
  return None
20
-
21
- model.set_generation_params(duration=duration)
22
- wav = model.generate([description])
23
 
24
- # Process for Gradio output
25
- wav = wav.cpu().numpy().squeeze()
26
- sampling_rate = model.sample_rate
27
- return (sampling_rate, wav)
 
 
 
 
 
 
 
 
 
 
28
 
29
- # 3. Build the Interface
30
- with gr.Blocks(css=custom_css) as demo:
31
- gr.Markdown("# 🎵 Tom-The-Ai-Music-AI")
32
- gr.Markdown("Describe your vibe and Tom will compose it. *Note: CPU generation takes about 60-90 seconds.*")
33
 
34
  with gr.Row():
35
  with gr.Column():
36
- prompt = gr.Textbox(
37
- label="What should Tom play?",
38
- placeholder="e.g., Synthwave with heavy bass and 80s drums",
39
  lines=3
40
  )
41
- duration = gr.Slider(minimum=2, maximum=15, value=8, step=1, label="Seconds")
42
- btn = gr.Button("Generate Music", variant="primary")
43
-
 
 
44
  with gr.Column():
45
  output_audio = gr.Audio(label="Tom's Masterpiece")
46
 
47
- btn.click(fn=generate_music, inputs=[prompt, duration], outputs=output_audio)
48
 
49
- # 4. Launch with the clean settings
50
  if __name__ == "__main__":
51
  demo.launch(
52
  server_name="0.0.0.0",
53
  server_port=7860,
54
- show_api=False # Extra layer to hide API link
55
  )
 
1
  import gradio as gr
2
+ from transformers import pipeline
3
  import torch
4
  import numpy as np
5
 
6
+ # Load Tom's brain (MusicGen-Small)
7
+ # We use 'cpu' because Free Spaces don't have GPUs
8
+ synthesiser = pipeline("text-to-audio", "facebook/musicgen-small", device="cpu")
9
 
10
+ # Your requested Clean UI CSS
11
  custom_css = """
12
  footer {display: none !important;}
13
  .show-api {display: none !important;}
14
  button.reference {display: none !important;}
 
15
  """
16
 
17
+ def tom_make_music(prompt, duration):
18
+ if not prompt:
19
  return None
 
 
 
20
 
21
+ # 50 tokens = ~1 second of audio
22
+ max_tokens = int(duration * 50)
23
+
24
+ # Generate the audio
25
+ output = synthesiser(
26
+ prompt,
27
+ forward_params={
28
+ "do_sample": True,
29
+ "max_new_tokens": max_tokens
30
+ }
31
+ )
32
+
33
+ # Return as (sampling_rate, audio_data)
34
+ return (output["sampling_rate"], output["audio"].squeeze())
35
 
36
+ # Build the Interface
37
+ with gr.Blocks(css=custom_css, title="Tom The Music AI") as demo:
38
+ gr.Markdown("# 🎹 Tom-The-Ai-Music-AI")
39
+ gr.Markdown("I'm Tom. Describe a song and I'll generate it for you.")
40
 
41
  with gr.Row():
42
  with gr.Column():
43
+ input_text = gr.Textbox(
44
+ label="What should I play?",
45
+ placeholder="e.g., Chill lo-fi beats for studying",
46
  lines=3
47
  )
48
+ duration_slider = gr.Slider(
49
+ minimum=2, maximum=15, value=8, step=1, label="Seconds"
50
+ )
51
+ btn = gr.Button("Generate with Tom", variant="primary")
52
+
53
  with gr.Column():
54
  output_audio = gr.Audio(label="Tom's Masterpiece")
55
 
56
+ btn.click(fn=tom_make_music, inputs=[input_text, duration_slider], outputs=output_audio)
57
 
 
58
  if __name__ == "__main__":
59
  demo.launch(
60
  server_name="0.0.0.0",
61
  server_port=7860,
62
+ show_api=False
63
  )