Spaces:
Sleeping
Sleeping
| #============================================================================================ | |
| # https://huggingface.co/spaces/projectlosangeles/Orpheus-Music-Segmentator | |
| #============================================================================================ | |
| print('=' * 70) | |
| print('Orpheus Music Segmentator Gradio App') | |
| print('=' * 70) | |
| print('Loading core Orpheus Music Segmentator modules...') | |
| import os | |
| import copy | |
| import time as reqtime | |
| import datetime | |
| from pytz import timezone | |
| print('=' * 70) | |
| print('Loading main Orpheus Music Segmentator modules...') | |
| os.environ['USE_FLASH_ATTENTION'] = '1' | |
| import torch | |
| torch.set_float32_matmul_precision('high') | |
| torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul | |
| torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn | |
| torch.backends.cuda.enable_flash_sdp(True) | |
| from huggingface_hub import hf_hub_download | |
| import TMIDIX | |
| from midi_to_colab_audio import midi_to_colab_audio | |
| from x_transformer_2_3_1 import * | |
| import random | |
| import tqdm | |
| print('=' * 70) | |
| print('Loading aux Orpheus Music Segmentator modules...') | |
| import matplotlib.pyplot as plt | |
| import gradio as gr | |
| import spaces | |
| print('=' * 70) | |
| print('PyTorch version:', torch.__version__) | |
| print('=' * 70) | |
| print('Done!') | |
| print('Enjoy! :)') | |
| print('=' * 70) | |
| #================================================================================== | |
| MODEL_CHECKPOINT = 'Orpheus_Music_Transformer_LRNO_Segments_Fine_Tuned_Model_1986_steps_0.5946_loss_0.8384_acc.pth' | |
| SOUNDFONT_PATH = 'SGM-v2.01-YamahaGrand-Guit-Bass-v2.7.sf2' | |
| #================================================================================== | |
| print('=' * 70) | |
| print('Instantiating model...') | |
| device_type = 'cuda' | |
| dtype = 'bfloat16' | |
| ptdtype = {'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype] | |
| ctx = torch.amp.autocast(device_type=device_type, dtype=ptdtype) | |
| SEQ_LEN = 1668 | |
| PAD_IDX = 18819 | |
| model = TransformerWrapper(num_tokens = PAD_IDX+1, | |
| max_seq_len = SEQ_LEN, | |
| attn_layers = Decoder(dim = 2048, | |
| depth = 8, | |
| heads = 32, | |
| rotary_pos_emb = True, | |
| attn_flash = True | |
| ) | |
| ) | |
| model = AutoregressiveWrapper(model, ignore_index=PAD_IDX, pad_value=PAD_IDX) | |
| print('=' * 70) | |
| print('Loading model checkpoint...') | |
| model_checkpoint = hf_hub_download(repo_id='asigalov61/Orpheus-Music-Transformer', filename=MODEL_CHECKPOINT) | |
| model.load_state_dict(torch.load(model_checkpoint, map_location=device_type, weights_only=True)) | |
| model = torch.compile(model, mode='max-autotune') | |
| model.to(device_type) | |
| model.eval() | |
| print('=' * 70) | |
| print('Done!') | |
| print('=' * 70) | |
| print('Model will use', dtype, 'precision...') | |
| print('=' * 70) | |
| #================================================================================== | |
| def load_midi(input_midi, add_monophonic_melody=False): | |
| raw_score = TMIDIX.midi2single_track_ms_score(input_midi) | |
| escore_notes = TMIDIX.advanced_score_processor(raw_score, return_enhanced_score_notes=True, apply_sustain=True) | |
| if escore_notes and escore_notes[0]: | |
| escore_notes = TMIDIX.augment_enhanced_score_notes(escore_notes[0], sort_drums_last=True) | |
| if add_monophonic_melody: | |
| escore_notes = TMIDIX.add_expressive_melody_to_enhanced_score_notes(escore_notes) | |
| dscore = TMIDIX.delta_score_notes(escore_notes) | |
| dcscore = TMIDIX.chordify_score([d[1:] for d in dscore]) | |
| chords = [] | |
| #======================================================= | |
| # MAIN PROCESSING CYCLE | |
| #======================================================= | |
| for i, c in enumerate(dcscore): | |
| delta_time = c[0][0] | |
| cho = [] | |
| cho.append(delta_time) | |
| for e in c: | |
| #======================================================= | |
| # Durations | |
| dur = max(1, min(255, e[1])) | |
| # Patches | |
| pat = max(0, min(128, e[5])) | |
| # Pitches | |
| ptc = max(1, min(127, e[3])) | |
| # Velocities | |
| # Calculating octo-velocity | |
| vel = max(8, min(127, e[4])) | |
| velocity = round(vel / 15)-1 | |
| #======================================================= | |
| # FINAL NOTE SEQ | |
| #======================================================= | |
| # Writing final note | |
| pat_ptc = (128 * pat) + ptc | |
| dur_vel = (8 * dur) + velocity | |
| cho.extend([pat_ptc+256, dur_vel+16768]) # 18816 | |
| chords.append(cho) | |
| print('Done!') | |
| print('=' * 70) | |
| print('Score hss', len(chords), 'chords') | |
| print('=' * 70) | |
| return chords | |
| else: | |
| return None | |
| #================================================================================== | |
| def Segment_Song(input_midi, | |
| add_monophonic_melody, | |
| model_temperature, | |
| model_sampling_top_k | |
| ): | |
| #=============================================================================== | |
| print('=' * 70) | |
| print('Req start time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT))) | |
| start_time = reqtime.time() | |
| print('=' * 70) | |
| print('=' * 70) | |
| print('Requested settings:') | |
| print('=' * 70) | |
| fn = os.path.basename(input_midi) | |
| fn1 = fn.split('.')[0] | |
| print('Input MIDI file name:', fn) | |
| print('Add monophonic melody:', add_monophonic_melody) | |
| print('Model temperature:', model_temperature) | |
| print('Model top k:', model_sampling_top_k) | |
| print('=' * 70) | |
| #================================================================== | |
| if input_midi is not None: | |
| print('Loading MIDI...') | |
| chords = load_midi(input_midi.name, add_monophonic_melody) | |
| if chords is not None: | |
| print('Sample score chord', chords[0]) | |
| #================================================================== | |
| print('=' * 70) | |
| print('Segmenting...') | |
| segments = [] | |
| melody_chords = [18816] + chords[0] | |
| for chord in tqdm.tqdm(chords[1:]): | |
| x = torch.LongTensor(melody_chords).cuda() | |
| with ctx: | |
| out = model.generate(x, | |
| 1, | |
| temperature=model_temperature, | |
| filter_logits_fn=top_k, | |
| filter_kwargs={'k': model_sampling_top_k}, | |
| return_prime=False, | |
| verbose=False) | |
| y = out.tolist()[0] | |
| if y == 18818: | |
| segments.append(melody_chords) | |
| melody_chords = [18816] | |
| melody_chords.extend(chord) | |
| melody_chords[0] = 0 | |
| else: | |
| melody_chords.extend(chord) | |
| #================================================================== | |
| if len(segments) < 2: | |
| segments = [TMIDIX.flatten(chords)] | |
| #================================================================== | |
| print('=' * 70) | |
| print('Done!') | |
| print('=' * 70) | |
| print('Song was split into', len(segments), 'segments') | |
| print('=' * 70) | |
| #=============================================================================== | |
| print('Rendering results...') | |
| print('=' * 70) | |
| #=============================================================================== | |
| all_songs = [] | |
| for song in segments: | |
| song_f = [] | |
| time = 0 | |
| dur = 1 | |
| vel = 90 | |
| pitch = 60 | |
| channel = 0 | |
| patch = 0 | |
| patches = [-1] * 16 | |
| channels = [0] * 16 | |
| channels[9] = 1 | |
| for ss in song: | |
| if 0 <= ss < 256: | |
| time += ss * 16 | |
| if 256 <= ss < 16768: | |
| patch = (ss-256) // 128 | |
| if patch < 128: | |
| if patch not in patches: | |
| if 0 in channels: | |
| cha = channels.index(0) | |
| channels[cha] = 1 | |
| else: | |
| cha = 15 | |
| patches[cha] = patch | |
| channel = patches.index(patch) | |
| else: | |
| channel = patches.index(patch) | |
| if patch == 128: | |
| channel = 9 | |
| pitch = (ss-256) % 128 | |
| if 16768 <= ss < 18816: | |
| dur = ((ss-16768) // 8) * 16 | |
| vel = (((ss-16768) % 8)+1) * 15 | |
| song_f.append(['note', time, dur, channel, pitch, vel, patch]) | |
| all_songs.append(song_f) | |
| #================================================================================== | |
| if len(all_songs) > 1: | |
| medley = TMIDIX.escore_notes_medley(all_songs, pause_time_value=8000) | |
| else: | |
| medley = all_songs[0] | |
| #================================================================================== | |
| output_score, patches, overflow_patches = TMIDIX.patch_enhanced_score_notes(medley) | |
| fn1 = "Orpheus-Music-Segmentator-Composition" | |
| detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(output_score, | |
| output_signature = 'Orpheus Music Segmentator', | |
| output_file_name = fn1, | |
| track_name='Project Los Angeles', | |
| list_of_MIDI_patches=patches | |
| ) | |
| new_fn = fn1+'.mid' | |
| audio = midi_to_colab_audio(new_fn, | |
| soundfont_path=SOUNDFONT_PATH, | |
| sample_rate=16000, | |
| output_for_gradio=True | |
| ) | |
| print('Done!') | |
| print('=' * 70) | |
| #======================================================== | |
| output_midi = str(new_fn) | |
| output_audio = (16000, audio) | |
| output_plot = TMIDIX.plot_ms_SONG(output_score, | |
| plot_title=output_midi, | |
| return_plt=True | |
| ) | |
| print('Output MIDI file name:', output_midi) | |
| print('=' * 70) | |
| #======================================================== | |
| else: | |
| return None, None, None | |
| print('-' * 70) | |
| print('Req end time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT))) | |
| print('-' * 70) | |
| print('Req execution time:', (reqtime.time() - start_time), 'sec') | |
| return output_audio, output_plot, output_midi | |
| else: | |
| return None, None, None | |
| #================================================================================== | |
| PDT = timezone('US/Pacific') | |
| print('=' * 70) | |
| print('App start time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT))) | |
| print('=' * 70) | |
| #================================================================================== | |
| with gr.Blocks() as demo: | |
| #================================================================================== | |
| gr.Markdown("<h1 style='text-align: left; margin-bottom: 1rem'>Orpheus Music Segmentator</h1>") | |
| gr.Markdown("<h1 style='text-align: left; margin-bottom: 1rem'>Segment any song into coherent separate parts</h1>") | |
| gr.HTML(""" | |
| <p> | |
| <a href="https://huggingface.co/spaces/projectlosangeles/Orpheus-Music-Segmentator?duplicate=true"> | |
| <img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-md.svg" alt="Duplicate in Hugging Face"> | |
| </a> | |
| </p> | |
| for faster execution and endless generation! | |
| """) | |
| #================================================================================== | |
| gr.Markdown("## Upload source MIDI or select a sample MIDI on the bottom of the page") | |
| gr.Markdown("### For best results, upload a MIDI with at least one monophonic melody!") | |
| input_midi = gr.File(label="Input MIDI", | |
| file_types=[".midi", ".mid", ".kar"] | |
| ) | |
| gr.Markdown("## Segmentation options") | |
| add_monophonic_melody = gr.Checkbox(value=False, label="Add monophonic melody") | |
| model_temperature = gr.Slider(0.1, 1.0, value=1.0, step=0.01, label="Model temperature") | |
| model_sampling_top_k = gr.Slider(1, 15, value=1, step=1, label="Model sampling top k value") | |
| generate_btn = gr.Button("Segment", variant="primary") | |
| gr.Markdown("## Segmentation results") | |
| output_audio = gr.Audio(label="MIDI audio", format="wav", elem_id="midi_audio") | |
| output_plot = gr.Plot(label="MIDI score plot") | |
| output_midi = gr.File(label="MIDI file", file_types=[".mid"]) | |
| generate_btn.click(Segment_Song, | |
| [input_midi, | |
| add_monophonic_melody, | |
| model_temperature, | |
| model_sampling_top_k | |
| ], | |
| [output_audio, | |
| output_plot, | |
| output_midi | |
| ] | |
| ) | |
| gr.Examples( | |
| [["All Out of Love.mid", False, 1.0, 2], | |
| ["POP909_001.mid", True, 1.0, 2], | |
| ["Sharing The Night Together.kar", False, 1.0, 2] | |
| ], | |
| [input_midi, | |
| add_monophonic_melody, | |
| model_temperature, | |
| model_sampling_top_k | |
| ], | |
| [output_audio, | |
| output_plot, | |
| output_midi | |
| ], | |
| Segment_Song | |
| ) | |
| #================================================================================== | |
| demo.launch() | |
| #================================================================================== |