YAML Metadata
Warning:
empty or missing yaml metadata in repo card
(https://huggingface.co/docs/hub/model-cards#model-card-metadata)
https://github.com/xingjianll/symbolic-music-generation/
from miditok import PerTok, TokenizerConfig
from transformers import AutoModelForCausalLM, GenerationConfig
import torch
import io
import mido
import rtmidi
# Load tokenizer config and tokenizer
config = TokenizerConfig(
num_velocities=8,
use_velocities=True,
use_chords=False,
use_rests=True,
use_tempos=True,
use_time_signatures=False,
use_sustain_pedals=False,
use_pitch_bends=False,
use_pitch_intervals=False,
use_programs=False,
use_pitchdrum_tokens=False,
ticks_per_quarter=320,
use_microtiming=False,
max_microtiming_shift=0.125
)
tokenizer = PerTok(config)
tokenizer.from_pretrained("xingjianll/midi-tokenizer")
# Load model and generation config
model = AutoModelForCausalLM.from_pretrained("xingjianll/midi-gpt2")
gen_config = GenerationConfig.from_pretrained("xingjianll/midi-gpt2")
model.eval()
# Generate token sequence
with torch.no_grad():
input_ids = torch.tensor([[tokenizer["BOS_None"]]], dtype=torch.long)
output = model.generate(
input_ids=input_ids,
generation_config=gen_config
)
# Decode tokens to MIDI bytes
generated_ids = output[0].tolist()
midi_bytes = tokenizer.decode([generated_ids]).dumps_midi()
midi_file = io.BytesIO(midi_bytes)
# Optionally save to disk
# with open("output.mid", "wb") as f:
# f.write(midi_bytes)
# Setup MIDI output to GarageBand
midiout = rtmidi.MidiOut()
available_ports = midiout.get_ports()
print("Available MIDI ports:", available_ports)
# Choose GarageBand's virtual port
port = mido.open_output('GarageBand Virtual In')
# Play MIDI via GarageBand
midi = mido.MidiFile(file=midi_file)
for msg in midi.play():
print(msg)
port.send(msg)
- Downloads last month
- 15
Inference Providers
NEW
This model isn't deployed by any Inference Provider.
๐
Ask for provider support