matthewbarberdev commited on
Commit
8c1332e
·
verified ·
1 Parent(s): 7532e43

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -71
app.py CHANGED
@@ -7,16 +7,6 @@ import pretty_midi
7
  import subprocess
8
  import os
9
  from openai import OpenAI
10
- # Audio support flag
11
- try:
12
- import pygame
13
- pygame.mixer.init()
14
- AUDIO_AVAILABLE = True
15
- except Exception as e:
16
- print(f"[WARN] pygame audio init failed: {e}")
17
- AUDIO_AVAILABLE = False
18
-
19
- midi_path_global = None
20
 
21
  # === LLM APIs ===
22
  def query_llm(prompt, model_name=None):
@@ -25,22 +15,24 @@ def query_llm(prompt, model_name=None):
25
  response = requests.post("http://localhost:11434/api/generate", json={"model": model_name, "prompt": prompt, "stream": False})
26
  return response.json().get("response", "")
27
  else:
28
- # Replace or load from environment
29
  client = OpenAI(
30
- base_url="https://api.studio.nebius.com/v1/",
31
- api_key=os.environ.get("NEBIUS_API_KEY")
32
- )
33
  response = client.chat.completions.create(
34
- model="Qwen/Qwen3-30B-A3B",
35
- max_tokens=512,
36
- temperature=0.6,
37
- top_p=0.9,
38
- extra_body={
39
- "top_k": 50
40
- },
41
- messages=[]
42
- )
43
- return response["choices"][0]["message"]["content"]
 
 
 
44
 
45
  # === Step 1: Parse intent ===
46
  def get_intent_from_prompt(prompt, model_name):
@@ -117,46 +109,17 @@ def midi_from_plan(melody, tempo):
117
  midi.instruments.append(instrument)
118
  return midi
119
 
 
120
  def generate_midi_from_prompt(prompt, model_name):
121
- global midi_path_global
122
  intent = get_intent_from_prompt(prompt, model_name)
123
  melody = get_melody_from_intent(intent, model_name)
124
  midi = midi_from_plan(melody, intent.get("tempo", 120))
125
 
126
  with tempfile.NamedTemporaryFile(delete=False, suffix=".mid") as tmp:
127
  midi.write(tmp.name)
128
- midi_path_global = tmp.name
129
- return midi_path_global
130
-
131
- def play_audio():
132
- if not AUDIO_AVAILABLE:
133
- return "Audio playback not available."
134
- if midi_path_global is None:
135
- return "No MIDI file loaded yet."
136
- try:
137
- pygame.mixer.music.load(midi_path_global)
138
- pygame.mixer.music.play()
139
- return "Playing audio..."
140
- except Exception as e:
141
- return f"Error playing audio: {e}"
142
-
143
- def pause_audio():
144
- if not AUDIO_AVAILABLE:
145
- return "Audio playback not available."
146
- pygame.mixer.music.pause()
147
- return "Paused."
148
-
149
- def unpause_audio():
150
- if not AUDIO_AVAILABLE:
151
- return "Audio playback not available."
152
- pygame.mixer.music.unpause()
153
- return "Unpaused."
154
 
155
- def stop_audio():
156
- if not AUDIO_AVAILABLE:
157
- return "Audio playback not available."
158
- pygame.mixer.music.stop()
159
- return "Stopped."
160
 
161
  # === Get Ollama models ===
162
  def get_ollama_models():
@@ -169,7 +132,6 @@ def get_ollama_models():
169
 
170
  # === Gradio UI ===
171
  models = get_ollama_models()
172
-
173
  demo = gr.Interface(
174
  fn=generate_midi_from_prompt,
175
  inputs=[
@@ -180,19 +142,7 @@ demo = gr.Interface(
180
  gr.File(label="🎵 Download MIDI File")
181
  ],
182
  title="🎼 Music Command Prompt (MCP Agent)",
183
- description="Describe your music idea and download a generated MIDI file. Choose from local or OpenAI LLMs."
184
  )
185
 
186
- with gr.Row():
187
- with gr.Column(scale=1):
188
- play_btn = gr.Button("▶ Play")
189
- pause_btn = gr.Button("⏸ Pause")
190
- unpause_btn = gr.Button("▶ Resume")
191
- stop_btn = gr.Button("⏹ Stop")
192
- with gr.Column(scale=3):
193
- status = gr.Textbox(label="Audio Status", interactive=False)
194
-
195
- play_btn.click(play_audio, None, status)
196
- pause_btn.click(pause_audio, None, status)
197
- unpause_btn.click(unpause_audio, None, status)
198
- stop_btn.click(stop_audio, None, status)
 
7
  import subprocess
8
  import os
9
  from openai import OpenAI
 
 
 
 
 
 
 
 
 
 
10
 
11
  # === LLM APIs ===
12
  def query_llm(prompt, model_name=None):
 
15
  response = requests.post("http://localhost:11434/api/generate", json={"model": model_name, "prompt": prompt, "stream": False})
16
  return response.json().get("response", "")
17
  else:
 
18
  client = OpenAI(
19
+ base_url="https://api.studio.nebius.com/v1/",
20
+ api_key=os.environ.get("NEBIUS_API_KEY")
21
+ )
22
  response = client.chat.completions.create(
23
+ model="Qwen/Qwen3-30B-A3B",
24
+ messages=[
25
+ {
26
+ "role": "system",
27
+ "content": "You are a helpful assistant."
28
+ },
29
+ {
30
+ "role": "user",
31
+ "content": prompt
32
+ }
33
+ ]
34
+ )
35
+ return response.choices[0].message.content
36
 
37
  # === Step 1: Parse intent ===
38
  def get_intent_from_prompt(prompt, model_name):
 
109
  midi.instruments.append(instrument)
110
  return midi
111
 
112
+ # === Gradio function ===
113
  def generate_midi_from_prompt(prompt, model_name):
 
114
  intent = get_intent_from_prompt(prompt, model_name)
115
  melody = get_melody_from_intent(intent, model_name)
116
  midi = midi_from_plan(melody, intent.get("tempo", 120))
117
 
118
  with tempfile.NamedTemporaryFile(delete=False, suffix=".mid") as tmp:
119
  midi.write(tmp.name)
120
+ midi_path = tmp.name
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
 
122
+ return midi_path
 
 
 
 
123
 
124
  # === Get Ollama models ===
125
  def get_ollama_models():
 
132
 
133
  # === Gradio UI ===
134
  models = get_ollama_models()
 
135
  demo = gr.Interface(
136
  fn=generate_midi_from_prompt,
137
  inputs=[
 
142
  gr.File(label="🎵 Download MIDI File")
143
  ],
144
  title="🎼 Music Command Prompt (MCP Agent)",
145
+ description="Describe your music idea and download a generated MIDI file. Choose from local or Nebius/OpenAI LLMs."
146
  )
147
 
148
+ demo.launch(mcp_server=True)