amongusrickroll68's picture
Update app.py
6b83d53
!pip install openai mido pretty_midi
import openai
import mido
import pretty_midi
# Set up your OpenAI API key
openai.api_key = "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"
# Load the MIDI file
midi_file = 'example.mid'
midi_data = mido.MidiFile(midi_file)
midi_pretty = pretty_midi.PrettyMIDI(midi_file)
# Convert MIDI data to a prompt for Musenet
tempo = midi_pretty.get_tempo_changes()[0][0]
key_signature = midi_pretty.key_signature_changes[0].key_number
prompt = f"extend this {tempo:.0f}bpm {pretty_midi.key_name(key_signature)} MIDI file"
# Define the parameters for the Musenet generation
model = "openai/musenet"
length = 32 # Length of the generated audio in bars
temperature = 0.7 # Controls the randomness of the generation
# Generate the MIDI with Musenet
response = openai.Completion.create(
engine=model,
prompt=prompt,
max_tokens=1024,
n=1,
stop=None,
temperature=temperature,
timeout=60,
)
midi_data.extend(mido.MidiFile(type=1, ticks_per_beat=480))
for note in response.choices[0].text.strip().split():
pitch, start, duration, velocity = note.split(',')
start = float(start) * 480
duration = float(duration) * 480
velocity = int(float(velocity) * 127)
midi_data.add_track([
mido.Message('note_on', note=int(pitch), velocity=velocity, time=int(start)),
mido.Message('note_off', note=int(pitch), velocity=velocity, time=int(start + duration))
])
# Save the extended MIDI file
midi_data.save('extended.mid')