#================================================================= # https://huggingface.co/spaces/asigalov61/Orpheus-MIDI-Comparator #================================================================= print('=' * 70) print('Orpheus MIDI Comparator Gradio App') print('=' * 70) print('Loading Orpheus MIDI Comparator modules...') import os os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" import time as reqtime import datetime from pytz import timezone import torch torch.set_float32_matmul_precision('high') torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn torch.backends.cuda.enable_mem_efficient_sdp(True) torch.backends.cuda.enable_math_sdp(True) torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_cudnn_sdp(True) from huggingface_hub import hf_hub_download import spaces import gradio as gr from x_transformer_2_3_1 import * import random import tqdm from midi_to_colab_audio import midi_to_colab_audio import TMIDIX import matplotlib.pyplot as plt from sklearn.metrics import pairwise import numpy as np print('Done!') print('=' * 70) # ================================================================================================= MODEL_CHECKPOINT = 'Orpheus_Music_Transformer_Trained_Model_128497_steps_0.6934_loss_0.7927_acc.pth' SOUNDFONT_PATH = 'SGM-v2.01-YamahaGrand-Guit-Bass-v2.7.sf2' DEVICE = 'cuda' SEP = '=' * 70 # ================================================================================================= def print_sep(): print(SEP) # ================================================================================================= def hsv_to_rgb(h, s, v): if s == 0.0: return v, v, v i = int(h*6.0) f = (h*6.0) - i p = v*(1.0 - s) q = v*(1.0 - s*f) t = v*(1.0 - s*(1.0-f)) i = i%6 return [(v, t, p), (q, v, p), (p, v, t), (p, q, v), (t, p, v), (v, p, q)][i] def generate_colors(n): return [hsv_to_rgb(i/n, 1, 1) for i in range(n)] def add_arrays(a, b): return [sum(pair) for pair in zip(a, b)] def plot_ms_SONG(ms_song, preview_length_in_notes=0, block_lines_times_list = None, plot_title='ms Song', max_num_colors=129, drums_color_num=128, plot_size=(11,4), note_height = 0.75, show_grid_lines=False, return_plt = False, timings_multiplier=1, plot_curve_values=None, plot_curve_notes_step=200, save_plot='' ): '''Tegridy ms SONG plotter/vizualizer''' notes = [s for s in ms_song if s[0] == 'note'] if (len(max(notes, key=len)) != 7) and (len(min(notes, key=len)) != 7): print('The song notes do not have patches information') print('Please add patches to the notes in the song') else: start_times = [(s[1] * timings_multiplier) / 1000 for s in notes] durations = [(s[2] * timings_multiplier) / 1000 for s in notes] pitches = [s[4] for s in notes] patches = [s[6] for s in notes] colors = generate_colors(max_num_colors) colors[drums_color_num] = (1, 1, 1) pbl = (notes[preview_length_in_notes][1] * timings_multiplier) / 1000 fig, ax = plt.subplots(figsize=plot_size) # Create a rectangle for each note with color based on patch number for start, duration, pitch, patch in zip(start_times, durations, pitches, patches): rect = plt.Rectangle((start, pitch), duration, note_height, facecolor=colors[patch]) ax.add_patch(rect) if plot_curve_values is not None: stimes = start_times[plot_curve_notes_step // 2::plot_curve_notes_step] min_val = min(plot_curve_values) max_val = max(plot_curve_values) spcva = [((value - min_val) / (max(max_val - min_val, 0.00001))) * 100 for value in plot_curve_values] ax.plot(stimes[:len(spcva)], spcva[:len(stimes)], marker='o', linestyle='-', color='w') # Set the limits of the plot ax.set_xlim([min(start_times), max(add_arrays(start_times, durations))]) ax.set_ylim([min(spcva), max(spcva)]) # Set the background color to black ax.set_facecolor('black') fig.patch.set_facecolor('white') if preview_length_in_notes > 0: ax.axvline(x=pbl, c='white') if block_lines_times_list: for bl in block_lines_times_list: ax.axvline(x=bl, c='white') if show_grid_lines: ax.grid(color='white') plt.xlabel('Time (s)', c='black') plt.ylabel('MIDI Pitch', c='black') plt.title(plot_title) if return_plt: return fig if save_plot == '': plt.show() else: plt.savefig(save_plot) # ================================================================================================= def read_MIDI(input_midi, apply_sustains=True, remove_duplicate_pitches=True, remove_overlapping_durations=True ): """Process the input MIDI file and create a token sequence.""" raw_score = TMIDIX.midi2single_track_ms_score(input_midi) escore_notes = TMIDIX.advanced_score_processor(raw_score, return_enhanced_score_notes=True, apply_sustain=apply_sustains ) if escore_notes: escore_notes = TMIDIX.augment_enhanced_score_notes(escore_notes[0], sort_drums_last=True ) if remove_duplicate_pitches: escore_notes = TMIDIX.remove_duplicate_pitches_from_escore_notes(escore_notes) if remove_overlapping_durations: escore_notes = TMIDIX.fix_escore_notes_durations(escore_notes, min_notes_gap=0 ) dscore = TMIDIX.delta_score_notes(escore_notes) dcscore = TMIDIX.chordify_score([d[1:] for d in dscore]) melody_chords = [18816] melody_chords2 = [] #======================================================= # MAIN PROCESSING CYCLE #======================================================= for i, c in enumerate(dcscore): delta_time = c[0][0] melody_chords.append(delta_time) for e in c: #======================================================= # Durations dur = max(1, min(255, e[1])) # Patches pat = max(0, min(128, e[5])) # Pitches ptc = max(1, min(127, e[3])) # Velocities # Calculating octo-velocity vel = max(8, min(127, e[4])) velocity = round(vel / 15)-1 #======================================================= # FINAL NOTE SEQ #======================================================= # Writing final note pat_ptc = (128 * pat) + ptc dur_vel = (8 * dur) + velocity melody_chords.extend([pat_ptc+256, dur_vel+16768]) melody_chords2.append([pat_ptc+256, dur_vel+16768]) return melody_chords, melody_chords2 # ================================================================================================= def tokens_to_MIDI(tokens, MIDI_name): print('Rendering results...') print('=' * 70) print('Sample INTs', tokens[:12]) print('=' * 70) if len(tokens) != 0: song = tokens song_f = [] time = 0 dur = 1 vel = 90 pitch = 60 channel = 0 patch = 0 patches = [-1] * 16 channels = [0] * 16 channels[9] = 1 song_f = [] for ss in tokens: if 0 <= ss < 256: time += ss * 16 if 256 <= ss < 16768: patch = (ss-256) // 128 if patch < 128: if patch not in patches: if 0 in channels: cha = channels.index(0) channels[cha] = 1 else: cha = 15 patches[cha] = patch channel = patches.index(patch) else: channel = patches.index(patch) if patch == 128: channel = 9 pitch = (ss-256) % 128 if 16768 <= ss < 18816: dur = ((ss-16768) // 8) * 16 vel = (((ss-16768) % 8)+1) * 15 song_f.append(['note', time, dur, channel, pitch, vel, patch]) patches = [0 if x==-1 else x for x in patches] output_score, patches, overflow_patches = TMIDIX.patch_enhanced_score_notes(song_f) detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(output_score, output_signature = 'Orpheus MIDI Comparator', output_file_name = MIDI_name, track_name='Project Los Angeles', list_of_MIDI_patches=patches ) new_fn = MIDI_name+'.mid' audio = midi_to_colab_audio(new_fn, soundfont_path=SOUNDFONT_PATH, sample_rate=16000, volume_scale=10, output_for_gradio=True ) print('Done!') print('=' * 70) return new_fn, output_score, audio # ================================================================================================= def logsumexp_pooling(x, dim=1, keepdim=False): max_val, _ = torch.max(x, dim=dim, keepdim=True) lse = max_val + torch.log(torch.mean(torch.exp(x - max_val), dim=dim, keepdim=keepdim) + 1e-10) return lse # ================================================================================================= def gem_pooling(x, p=3.0, eps=1e-6): pooled = torch.mean(x ** p, dim=1) return pooled.clamp(min=eps).pow(1 / p) # ================================================================================================= def median_pooling(x, dim=1): return torch.median(x, dim=dim).values # ================================================================================================= def rms_pooling(x, dim=1): return torch.sqrt(torch.mean(x ** 2, dim=dim) + 1e-6) # ================================================================================================= def get_embeddings(inputs): with ctx: with torch.no_grad(): out = model(inputs, return_outputs=True) cache = out[3] hidden = cache.layer_hiddens[-1] mean_pool = torch.mean(hidden, dim=1) max_pool = torch.max(hidden, dim=1).values lse_pool = logsumexp_pooling(hidden, dim=1) gem_pool = gem_pooling(hidden, p=3.0) median_pool = median_pooling(hidden, dim=1) rms_pool = rms_pooling(hidden, dim=1) concat_pool = torch.cat((mean_pool, max_pool, lse_pool[0][:, :512], gem_pool[:, :512], median_pool[:, :512], rms_pool[:, :512]), dim=1) # return concat_pool.cpu().detach().numpy()[0] return hidden.cpu().detach().numpy()[0].flatten() # ================================================================================================= print('Loading Orpheus Music Transformer model...') dtype = 'bfloat16' ptdtype = {'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype] ctx = torch.amp.autocast(device_type=DEVICE, dtype=ptdtype) SEQ_LEN = 8192 PAD_IDX = 18819 model = TransformerWrapper( num_tokens=PAD_IDX + 1, max_seq_len=SEQ_LEN, attn_layers=Decoder( dim=2048, depth=8, heads=32, rotary_pos_emb=True, attn_flash=True ) ) model = AutoregressiveWrapper(model, ignore_index=PAD_IDX, pad_value=PAD_IDX) print_sep() print("Loading model checkpoint...") checkpoint = hf_hub_download(repo_id='asigalov61/Orpheus-Music-Transformer', filename=MODEL_CHECKPOINT ) model.load_state_dict(torch.load(checkpoint, map_location=DEVICE, weights_only=True)) model.to(DEVICE) model.eval() print_sep() print("Done!") print("Model will use", dtype, "precision...") print('Model will use', DEVICE, 'for inference...') print_sep() # ================================================================================================= @spaces.GPU def CompareMIDIs(input_src_midi, input_trg_midi, input_sampling_resolution, input_sampling_overlap): if input_src_midi is not None and input_trg_midi is not None: print('=' * 70) print('Req start time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT))) start_time = reqtime.time() print('Done!') print('=' * 70) sfn = os.path.basename(input_src_midi.name) sfn1 = sfn.split('.')[0] tfn = os.path.basename(input_trg_midi.name) tfn1 = tfn.split('.')[0] print('-' * 70) print('Input src MIDI name:', sfn) print('Input trg MIDI name:', tfn) print('Req sampling resolution:', input_sampling_resolution) print('Req sampling overlap:', input_sampling_overlap) print('-' * 70) #=============================================================================== print('Loading MIDIs...') src_tokens, src_notes = read_MIDI(input_src_midi.name) trg_tokens, trg_notes = read_MIDI(input_trg_midi.name) #================================================================== print('=' * 70) print('Number of src tokens:', len(src_tokens)) print('Number of src notes:', len(src_notes)) print('Number of trg tokens:', len(trg_tokens)) print('Number of trg notes:', len(trg_notes)) #========================================================================== print('=' * 70) print('Comparing...') print('=' * 70) print('Orpheus MIDI Comparator') print('=' * 70) avg_toks_to_notes_ratio = ((len(src_tokens) / len(src_notes)) + (len(trg_tokens) / len(trg_notes))) / 2 print('Average tokens to notes ratio:', avg_toks_to_notes_ratio) print('=' * 70) sampling_resolution = int(max(40, min(1000, input_sampling_resolution)) * avg_toks_to_notes_ratio) sampling_overlap = int(max(0, min(500, input_sampling_overlap)) * avg_toks_to_notes_ratio) comp_length = int((min(len(src_tokens), len(trg_tokens)) / sampling_resolution) * sampling_resolution) input_src_tokens = src_tokens[:comp_length] input_trg_tokens = trg_tokens[:comp_length] comp_cos_sims = [] # torch.cuda.empty_cache() for i in range(0, comp_length, max(1, sampling_resolution-sampling_overlap)): inp = [input_src_tokens[i:i+sampling_resolution]] inp = torch.LongTensor(inp).to(DEVICE) src_embedings = get_embeddings(inp) inp = [input_trg_tokens[i:i+sampling_resolution]] inp = torch.LongTensor(inp).to(DEVICE) trg_embedings = get_embeddings(inp) cos_sim = pairwise.cosine_similarity([src_embedings.flatten()], [trg_embedings.flatten()] ).tolist()[0][0] comp_cos_sims.append(cos_sim) output_min_sim = min(comp_cos_sims) output_avg_sim = sum(comp_cos_sims) / len(comp_cos_sims) output_max_sim = max(comp_cos_sims) print('Min sim:', output_min_sim) print('Avg sim:', output_avg_sim) print('max sim:', output_max_sim) print('=' * 70) print('Done!') print('=' * 70) #=============================================================================== print('Rendering results...') sname, ssong_f, saudio = tokens_to_MIDI(src_tokens[:comp_length], sfn1) tname, tsong_f, taudio = tokens_to_MIDI(trg_tokens[:comp_length], tfn1) #======================================================== output_src_audio = (16000, saudio) output_src_plot = plot_ms_SONG(ssong_f, plot_title=sfn1, plot_curve_values=comp_cos_sims, plot_curve_notes_step=max(1, int(sampling_resolution-sampling_overlap / avg_toks_to_notes_ratio)), return_plt=True ) output_trg_audio = (16000, taudio) output_trg_plot = plot_ms_SONG(tsong_f, plot_title=tfn1, plot_curve_values=comp_cos_sims, plot_curve_notes_step=max(1, int(sampling_resolution-sampling_overlap / avg_toks_to_notes_ratio)), return_plt=True ) print('Done!') print('=' * 70) #======================================================== print('-' * 70) print('Req end time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT))) print('-' * 70) print('Req execution time:', (reqtime.time() - start_time), 'sec') return output_src_audio, output_src_plot, output_trg_audio, output_trg_plot, output_min_sim, output_avg_sim, output_max_sim else: return None, None, None, None, None, None, None # ================================================================================================= PDT = timezone('US/Pacific') print('=' * 70) print('App start time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT))) print('=' * 70) app = gr.Blocks() with app: gr.Markdown("