Upload 12 files
Browse files- CornetAI_SavedModel/keras_metadata.pb +3 -0
- CornetAI_SavedModel/saved_model.pb +3 -0
- CornetAI_SavedModel/variables/variables.data-00000-of-00001 +0 -0
- CornetAI_SavedModel/variables/variables.index +0 -0
- models/CornetaAI.h5 +3 -0
- readme.txt +0 -0
- src/calificador.py +154 -0
- src/entrenar.py +143 -0
- src/evaluador.py +85 -0
- src/generadores/generador_maestro_midis.py +108 -0
- src/generadores/generadoraudio.py +47 -0
- src/generadores/genmidisreales.py +54 -0
CornetAI_SavedModel/keras_metadata.pb
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:69cb17ed575d1ebf501ab47830662ea63d84fcc81403e805869d3e3b619ca519
|
| 3 |
+
size 44482
|
CornetAI_SavedModel/saved_model.pb
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:517731222471a9be9a9e7ee66f3d1946efc32fb948da6aa199490fdc5e2f049c
|
| 3 |
+
size 783876
|
CornetAI_SavedModel/variables/variables.data-00000-of-00001
ADDED
|
Binary file (84.3 kB). View file
|
|
|
CornetAI_SavedModel/variables/variables.index
ADDED
|
Binary file (1.61 kB). View file
|
|
|
models/CornetaAI.h5
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c3e5cdfe18c7ae75059174ea221ed24add93a8ab27ab7356f3871ec016f749e0
|
| 3 |
+
size 306636
|
readme.txt
ADDED
|
File without changes
|
src/calificador.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
import numpy as np
|
| 4 |
+
import pretty_midi
|
| 5 |
+
import mir_eval
|
| 6 |
+
import matplotlib.pyplot as plt
|
| 7 |
+
from basic_pitch.inference import predict
|
| 8 |
+
|
| 9 |
+
# --- CONFIGURACIÓN ---
|
| 10 |
+
MODEL_PATH = "CornetAI_SavedModel"
|
| 11 |
+
ONSET_TOL = 0.150
|
| 12 |
+
CEILING_F1 = 0.858
|
| 13 |
+
|
| 14 |
+
def get_friendly_score(f1_value):
|
| 15 |
+
score = (f1_value / CEILING_F1) * 10
|
| 16 |
+
return min(10.0, round(score, 1))
|
| 17 |
+
|
| 18 |
+
def midi_a_espanol_corneta(midi_num):
|
| 19 |
+
"""Mapeo específico simplificado para corneta."""
|
| 20 |
+
n = midi_num % 12
|
| 21 |
+
mapping = {7: 'Sol', 8: 'La', 0: 'Do', 1: 'Re', 4: 'Mi'}
|
| 22 |
+
nombres_base = ['Do', 'Re', 'Re', 'Re', 'Mi', 'Fa', 'Fa', 'Sol', 'La', 'La', 'Si', 'Si']
|
| 23 |
+
return mapping.get(n, nombres_base[n])
|
| 24 |
+
|
| 25 |
+
def plot_piano_rolls(pm_ref, pm_est, puntuacion, fallos_texto, output_filename):
|
| 26 |
+
"""Genera la comparativa y la guarda como imagen con el informe incluido."""
|
| 27 |
+
fig = plt.figure(figsize=(15, 8))
|
| 28 |
+
ax = fig.add_axes([0.1, 0.1, 0.6, 0.8])
|
| 29 |
+
|
| 30 |
+
# 1. MIDI DE REFERENCIA
|
| 31 |
+
for i, note in enumerate(pm_ref.instruments[0].notes):
|
| 32 |
+
ax.barh(note.pitch, note.end - note.start, left=note.start,
|
| 33 |
+
height=0.4, color='green', alpha=0.3,
|
| 34 |
+
edgecolor='black', linewidth=1.5,
|
| 35 |
+
label="Partitura (Referencia)" if i == 0 else "")
|
| 36 |
+
|
| 37 |
+
# 2. MIDI DE EJECUCIÓN
|
| 38 |
+
for i, note in enumerate(pm_est.instruments[0].notes):
|
| 39 |
+
ax.barh(note.pitch, note.end - note.start, left=note.start,
|
| 40 |
+
height=0.4, color='blue', alpha=0.5,
|
| 41 |
+
label="Tu Ejecución (CornetAI)" if i == 0 else "")
|
| 42 |
+
|
| 43 |
+
# Obtener todas las notas MIDI únicas y crear etiquetas en español
|
| 44 |
+
all_pitches = set()
|
| 45 |
+
for note in pm_ref.instruments[0].notes:
|
| 46 |
+
all_pitches.add(note.pitch)
|
| 47 |
+
for note in pm_est.instruments[0].notes:
|
| 48 |
+
all_pitches.add(note.pitch)
|
| 49 |
+
|
| 50 |
+
all_pitches = sorted(all_pitches)
|
| 51 |
+
ytick_labels = [midi_a_espanol_corneta(p) for p in all_pitches]
|
| 52 |
+
|
| 53 |
+
ax.set_yticks(all_pitches)
|
| 54 |
+
ax.set_yticklabels(ytick_labels)
|
| 55 |
+
|
| 56 |
+
ax.set_xlabel("Tiempo (segundos)")
|
| 57 |
+
ax.set_ylabel("Nota")
|
| 58 |
+
ax.set_title(f"Informe de Evaluación CornetAI - {output_filename}")
|
| 59 |
+
ax.legend(loc='upper left')
|
| 60 |
+
ax.grid(axis='x', linestyle='--', alpha=0.3)
|
| 61 |
+
|
| 62 |
+
# 3. PANEL DE TEXTO (INFORME)
|
| 63 |
+
info_text = f"PUNTUACIÓN: {puntuacion}/10\n\n"
|
| 64 |
+
info_text += "CORRECCIONES:\n"
|
| 65 |
+
info_text += fallos_texto
|
| 66 |
+
|
| 67 |
+
fig.text(0.72, 0.85, " RESULTADOS", fontsize=16, fontweight='bold', color='darkblue')
|
| 68 |
+
fig.text(0.72, 0.5, info_text, fontsize=12, va='center',
|
| 69 |
+
bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.5))
|
| 70 |
+
|
| 71 |
+
img_name = output_filename.replace(".wav", "_resultado.png")
|
| 72 |
+
plt.savefig(img_name, dpi=300, bbox_inches='tight')
|
| 73 |
+
print(f"✅ Imagen de resultados guardada como: {img_name}")
|
| 74 |
+
|
| 75 |
+
plt.show()
|
| 76 |
+
|
| 77 |
+
def evaluar_ejecucion(audio_path, midi_gt_path):
|
| 78 |
+
if not os.path.exists(audio_path) or not os.path.exists(midi_gt_path):
|
| 79 |
+
print("❌ Error: Archivos no encontrados.")
|
| 80 |
+
return
|
| 81 |
+
|
| 82 |
+
print(f"Analizando interpretación...")
|
| 83 |
+
|
| 84 |
+
try:
|
| 85 |
+
_, midi_data, _ = predict(audio_path, model_or_model_path=MODEL_PATH)
|
| 86 |
+
except Exception as e:
|
| 87 |
+
print(f"Error en la inferencia: {e}")
|
| 88 |
+
return
|
| 89 |
+
|
| 90 |
+
pm_ref = pretty_midi.PrettyMIDI(midi_gt_path)
|
| 91 |
+
pm_est = midi_data
|
| 92 |
+
|
| 93 |
+
ref_notes = pm_ref.instruments[0].notes
|
| 94 |
+
est_notes = pm_est.instruments[0].notes
|
| 95 |
+
|
| 96 |
+
if not est_notes:
|
| 97 |
+
print("No se han detectado notas.")
|
| 98 |
+
return
|
| 99 |
+
|
| 100 |
+
ref_int = np.array([[n.start, n.end] for n in ref_notes])
|
| 101 |
+
ref_pit = np.array([pretty_midi.note_number_to_hz(n.pitch) for n in ref_notes])
|
| 102 |
+
est_int = np.array([[n.start, n.end] for n in est_notes])
|
| 103 |
+
est_pit = np.array([pretty_midi.note_number_to_hz(n.pitch) for n in est_notes])
|
| 104 |
+
|
| 105 |
+
metrics = mir_eval.transcription.evaluate(
|
| 106 |
+
ref_int, ref_pit, est_int, est_pit,
|
| 107 |
+
onset_tolerance=ONSET_TOL, offset_ratio=None
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
puntuacion = get_friendly_score(metrics['F-measure_no_offset'])
|
| 111 |
+
|
| 112 |
+
# Identificar notas falladas basándose en el onset (150ms)
|
| 113 |
+
fallados_idx = []
|
| 114 |
+
for i, ref_note in enumerate(ref_notes):
|
| 115 |
+
ref_onset = ref_note.start
|
| 116 |
+
# Buscar si hay alguna nota estimada dentro del margen de tolerancia
|
| 117 |
+
tiene_match_temporal = False
|
| 118 |
+
for est_note in est_notes:
|
| 119 |
+
est_onset = est_note.start
|
| 120 |
+
if abs(ref_onset - est_onset) <= ONSET_TOL:
|
| 121 |
+
tiene_match_temporal = True
|
| 122 |
+
break
|
| 123 |
+
|
| 124 |
+
if not tiene_match_temporal:
|
| 125 |
+
fallados_idx.append(i)
|
| 126 |
+
#Lista de fallos
|
| 127 |
+
fallos_lista = []
|
| 128 |
+
print("\n" + "="*45)
|
| 129 |
+
print(f"EVALUACIÓN DE CORNETAI")
|
| 130 |
+
print("="*45)
|
| 131 |
+
print(f">> NOTA FINAL: {puntuacion} / 10 <<")
|
| 132 |
+
print("="*45)
|
| 133 |
+
|
| 134 |
+
if fallados_idx:
|
| 135 |
+
for idx in fallados_idx[:8]: #Máximo 8 fallos listados
|
| 136 |
+
nota_es = midi_a_espanol_corneta(ref_notes[idx].pitch)
|
| 137 |
+
tiempo = round(ref_notes[idx].start, 2)
|
| 138 |
+
fallos_lista.append(f"- Revisa {nota_es} ({tiempo}s)")
|
| 139 |
+
print(f" - Revisa el {nota_es} en el segundo {tiempo}")
|
| 140 |
+
|
| 141 |
+
if len(fallados_idx) > 6:
|
| 142 |
+
fallos_lista.append(f"... y {len(fallados_idx)-6} más.")
|
| 143 |
+
else:
|
| 144 |
+
fallos_lista.append("¡Interpretación Perfecta!")
|
| 145 |
+
|
| 146 |
+
fallos_texto = "\n".join(fallos_lista)
|
| 147 |
+
|
| 148 |
+
plot_piano_rolls(pm_ref, pm_est, puntuacion, fallos_texto, os.path.basename(audio_path))
|
| 149 |
+
|
| 150 |
+
if __name__ == "__main__":
|
| 151 |
+
if len(sys.argv) == 3:
|
| 152 |
+
evaluar_ejecucion(sys.argv[1], sys.argv[2])
|
| 153 |
+
else:
|
| 154 |
+
print("Uso: python evaluador_individual.py <audio.wav> <referencia.mid>")
|
src/entrenar.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import numpy as np
|
| 3 |
+
import tensorflow as tf
|
| 4 |
+
import pretty_midi
|
| 5 |
+
import librosa
|
| 6 |
+
from basic_pitch.models import model as bp_model
|
| 7 |
+
from basic_pitch import ICASSP_2022_MODEL_PATH
|
| 8 |
+
from sklearn.model_selection import train_test_split
|
| 9 |
+
|
| 10 |
+
# --- CONFIGURACIÓN DE RUTAS ---
|
| 11 |
+
PATH_DATASET_WAV = "dataset_wavs"
|
| 12 |
+
PATH_DATASET_MIDI = "dataset_midis"
|
| 13 |
+
MODEL_SAVE_PATH = "CornetaAI.h5"
|
| 14 |
+
|
| 15 |
+
# IDENTIFICADOR DE ARCHIVOS REALES
|
| 16 |
+
FILTRO_REAL = "ejercicio"
|
| 17 |
+
|
| 18 |
+
# Parámetros Técnicos
|
| 19 |
+
SAMPLE_RATE = 22050
|
| 20 |
+
SAMPLES_PER_CLIP = 43844
|
| 21 |
+
HOP_LENGTH = 256
|
| 22 |
+
ANNOTATIONS_FPS = SAMPLE_RATE / HOP_LENGTH
|
| 23 |
+
N_FREQ_BINS = 88
|
| 24 |
+
N_FREQ_BINS_CONTOUR = 264
|
| 25 |
+
TRAIN_DURATION = SAMPLES_PER_CLIP / SAMPLE_RATE
|
| 26 |
+
|
| 27 |
+
# Hiperparámetros Optimizados
|
| 28 |
+
BATCH_SIZE = 8
|
| 29 |
+
EPOCHS = 50
|
| 30 |
+
LEARNING_RATE = 0.0003
|
| 31 |
+
|
| 32 |
+
# --- FUNCIÓN DE PÉRDIDA BALANCEADA (95/5) ---
|
| 33 |
+
def weighted_binary_crossentropy(y_true, y_pred):
|
| 34 |
+
epsilon = tf.keras.backend.epsilon()
|
| 35 |
+
y_pred = tf.clip_by_value(y_pred, epsilon, 1.0 - epsilon)
|
| 36 |
+
loss = -(0.95 * y_true * tf.math.log(y_pred) + 0.05 * (1.0 - y_true) * tf.math.log(1.0 - y_pred))
|
| 37 |
+
return tf.reduce_mean(loss)
|
| 38 |
+
|
| 39 |
+
def get_data(wav, midi, frames_output, augment=False):
|
| 40 |
+
"""Carga audio/midi con Pitch Shifting aleatorio para robustez."""
|
| 41 |
+
try:
|
| 42 |
+
audio, _ = librosa.load(wav, sr=SAMPLE_RATE)
|
| 43 |
+
if len(audio) < SAMPLES_PER_CLIP:
|
| 44 |
+
audio = np.pad(audio, (0, SAMPLES_PER_CLIP - len(audio)))
|
| 45 |
+
|
| 46 |
+
max_start = len(audio) - SAMPLES_PER_CLIP
|
| 47 |
+
start_sample = np.random.randint(0, max_start + 1)
|
| 48 |
+
audio_crop = audio[start_sample : start_sample + SAMPLES_PER_CLIP]
|
| 49 |
+
|
| 50 |
+
if augment:
|
| 51 |
+
n_steps = np.random.uniform(-0.5, 0.5)
|
| 52 |
+
audio_crop = librosa.effects.pitch_shift(audio_crop, sr=SAMPLE_RATE, n_steps=n_steps)
|
| 53 |
+
audio_crop += np.random.normal(0, 0.001, audio_crop.shape)
|
| 54 |
+
|
| 55 |
+
if np.max(np.abs(audio_crop)) > 0:
|
| 56 |
+
audio_crop = audio_crop / np.max(np.abs(audio_crop))
|
| 57 |
+
|
| 58 |
+
pm = pretty_midi.PrettyMIDI(midi)
|
| 59 |
+
start_time, end_time = start_sample / SAMPLE_RATE, (start_sample / SAMPLE_RATE) + TRAIN_DURATION
|
| 60 |
+
|
| 61 |
+
targets = {
|
| 62 |
+
"note": np.zeros((frames_output, N_FREQ_BINS), dtype=np.float32),
|
| 63 |
+
"onset": np.zeros((frames_output, N_FREQ_BINS), dtype=np.float32),
|
| 64 |
+
"contour": np.zeros((frames_output, N_FREQ_BINS_CONTOUR), dtype=np.float32)
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
for note in pm.instruments[0].notes:
|
| 68 |
+
if note.end < start_time or note.start > end_time: continue
|
| 69 |
+
rel_start, rel_end = max(0.0, note.start - start_time), min(TRAIN_DURATION, note.end - start_time)
|
| 70 |
+
s, e = int(rel_start * ANNOTATIONS_FPS), int(rel_end * ANNOTATIONS_FPS)
|
| 71 |
+
p = note.pitch - 21
|
| 72 |
+
if 0 <= p < N_FREQ_BINS:
|
| 73 |
+
s, e = max(0, min(s, frames_output - 1)), max(0, min(e, frames_output))
|
| 74 |
+
if s < e:
|
| 75 |
+
targets["note"][s:e, p] = 1.0
|
| 76 |
+
if note.start >= start_time:
|
| 77 |
+
targets["onset"][max(0, s-1):min(frames_output, s+2), p] = 1.0
|
| 78 |
+
c = p * 3 + 1
|
| 79 |
+
targets["contour"][s:e, c] = 1.0
|
| 80 |
+
if c > 0: targets["contour"][s:e, c-1] = 0.5
|
| 81 |
+
if c < 263: targets["contour"][s:e, c+1] = 0.5
|
| 82 |
+
return audio_crop[:, np.newaxis], targets
|
| 83 |
+
except Exception: return None, None
|
| 84 |
+
|
| 85 |
+
class MasterGenerator(tf.keras.utils.Sequence):
|
| 86 |
+
def __init__(self, wavs, midis, frames_output, augment=False):
|
| 87 |
+
self.wavs, self.midis, self.frames_output, self.augment = wavs, midis, frames_output, augment
|
| 88 |
+
self.indices = np.arange(len(self.wavs))
|
| 89 |
+
self.on_epoch_end()
|
| 90 |
+
def __len__(self): return int(np.ceil(len(self.wavs)/BATCH_SIZE))
|
| 91 |
+
def __getitem__(self, idx):
|
| 92 |
+
batch_indices = self.indices[idx * BATCH_SIZE : (idx + 1) * BATCH_SIZE]
|
| 93 |
+
audios, notes, onsets, contours = [], [], [], []
|
| 94 |
+
for i in batch_indices:
|
| 95 |
+
a, t = get_data(self.wavs[i], self.midis[i], self.frames_output, augment=self.augment)
|
| 96 |
+
if a is not None:
|
| 97 |
+
audios.append(a); notes.append(t["note"]); onsets.append(t["onset"]); contours.append(t["contour"])
|
| 98 |
+
return np.array(audios), {"note": np.array(notes), "onset": np.array(onsets), "contour": np.array(contours)}
|
| 99 |
+
def on_epoch_end(self): np.random.shuffle(self.indices)
|
| 100 |
+
|
| 101 |
+
if __name__ == "__main__":
|
| 102 |
+
print("--- FINE-TUNING: VALIDACIÓN REAL PURA (25 Train / 10 Val) ---")
|
| 103 |
+
|
| 104 |
+
# 1. Cargar y clasificar archivos
|
| 105 |
+
all_wavs = [f for f in os.listdir(PATH_DATASET_WAV) if f.endswith(".wav")]
|
| 106 |
+
wav_real, midi_real, wav_sint, midi_sint = [], [], [], []
|
| 107 |
+
|
| 108 |
+
for w in all_wavs:
|
| 109 |
+
m = w.replace(".wav", ".mid")
|
| 110 |
+
if os.path.exists(os.path.join(PATH_DATASET_MIDI, m)):
|
| 111 |
+
path_w, path_m = os.path.join(PATH_DATASET_WAV, w), os.path.join(PATH_DATASET_MIDI, m)
|
| 112 |
+
if FILTRO_REAL.lower() in w.lower():
|
| 113 |
+
wav_real.append(path_w); midi_real.append(path_m)
|
| 114 |
+
else:
|
| 115 |
+
wav_sint.append(path_w); midi_sint.append(path_m)
|
| 116 |
+
|
| 117 |
+
# 2. Split de Reales (25 para entrenar, 10 para validar)
|
| 118 |
+
tr_rw, val_w, tr_rm, val_m = train_test_split(wav_real, midi_real, test_size=10, random_state=42)
|
| 119 |
+
|
| 120 |
+
# 3. Mezcla de Entrenamiento (500 sintéticos + 25 reales)
|
| 121 |
+
train_w, train_m = wav_sint + tr_rw, midi_sint + tr_rm
|
| 122 |
+
|
| 123 |
+
# 4. Configurar Modelo
|
| 124 |
+
model = bp_model()
|
| 125 |
+
model.load_weights(ICASSP_2022_MODEL_PATH)
|
| 126 |
+
frames_out = model(np.zeros((1, SAMPLES_PER_CLIP, 1))).get('note').shape[1]
|
| 127 |
+
|
| 128 |
+
for l in model.layers: l.trainable = 'cqt' not in l.name
|
| 129 |
+
model.compile(optimizer=tf.keras.optimizers.Adam(LEARNING_RATE),
|
| 130 |
+
loss={"note": "binary_crossentropy", "onset": weighted_binary_crossentropy, "contour": "binary_crossentropy"},
|
| 131 |
+
loss_weights={"note": 1.0, "onset": 1.5, "contour": 0.5})
|
| 132 |
+
|
| 133 |
+
train_gen = MasterGenerator(train_w, train_m, frames_out, augment=True)
|
| 134 |
+
val_gen = MasterGenerator(val_w, val_m, frames_out, augment=False)
|
| 135 |
+
|
| 136 |
+
callbacks = [
|
| 137 |
+
tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=8, restore_best_weights=True),
|
| 138 |
+
tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=3),
|
| 139 |
+
tf.keras.callbacks.ModelCheckpoint(MODEL_SAVE_PATH, monitor='val_loss', save_best_only=True)
|
| 140 |
+
]
|
| 141 |
+
|
| 142 |
+
print(f"📊 Train: {len(train_w)} (500 sint + 25 real) | Val: {len(val_w)} (10 real).")
|
| 143 |
+
model.fit(train_gen, validation_data=val_gen, epochs=EPOCHS, callbacks=callbacks)
|
src/evaluador.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import pretty_midi
|
| 3 |
+
import mir_eval
|
| 4 |
+
import numpy as np
|
| 5 |
+
import pandas as pd
|
| 6 |
+
|
| 7 |
+
# --- CONFIGURACIÓN ---
|
| 8 |
+
DIR_GT = "dataset_evaluación/midis_gt"
|
| 9 |
+
DIR_PRED_MY = "dataset_evaluación/resultados_midis/cornetai"
|
| 10 |
+
DIR_PRED_OFF = "dataset_evaluación/resultados_midis/official_bp"
|
| 11 |
+
|
| 12 |
+
# Tolerancias
|
| 13 |
+
ONSET_TOL = 0.150
|
| 14 |
+
OFFSET_RATIO_VAL = 0.5
|
| 15 |
+
FS = 100 # Frecuencia de muestreo para piano roll (hops de 10ms)
|
| 16 |
+
|
| 17 |
+
def get_frame_accuracy(pm_ref, pm_est):
|
| 18 |
+
"""Calcula la precisión por frame (Acc) comparando piano rolls."""
|
| 19 |
+
# Obtener piano rolls (binarizados)
|
| 20 |
+
pr_ref = (pm_ref.get_piano_roll(fs=FS) > 0).astype(int)
|
| 21 |
+
pr_est = (pm_est.get_piano_roll(fs=FS) > 0).astype(int)
|
| 22 |
+
|
| 23 |
+
# Alinear longitudes
|
| 24 |
+
max_len = max(pr_ref.shape[1], pr_est.shape[1])
|
| 25 |
+
pr_ref = np.pad(pr_ref, ((0,0), (0, max_len - pr_ref.shape[1])))
|
| 26 |
+
pr_est = np.pad(pr_est, ((0,0), (0, max_len - pr_est.shape[1])))
|
| 27 |
+
|
| 28 |
+
# Calcular TP, FP, FN a nivel de frame/pitch
|
| 29 |
+
tp = np.sum((pr_ref == 1) & (pr_est == 1))
|
| 30 |
+
fp = np.sum((pr_ref == 0) & (pr_est == 1))
|
| 31 |
+
fn = np.sum((pr_ref == 1) & (pr_est == 0))
|
| 32 |
+
|
| 33 |
+
return (tp / (tp + fp + fn)) * 100 if (tp + fp + fn) > 0 else 0
|
| 34 |
+
|
| 35 |
+
def get_full_metrics(path_ref, path_est):
|
| 36 |
+
try:
|
| 37 |
+
pm_ref = pretty_midi.PrettyMIDI(path_ref)
|
| 38 |
+
pm_est = pretty_midi.PrettyMIDI(path_est)
|
| 39 |
+
|
| 40 |
+
# 1. Note Metrics (Fno y F)
|
| 41 |
+
ref_int = np.array([[n.start, n.end] for n in pm_ref.instruments[0].notes])
|
| 42 |
+
ref_pit = np.array([pretty_midi.note_number_to_hz(n.pitch) for n in pm_ref.instruments[0].notes])
|
| 43 |
+
est_int = np.array([[n.start, n.end] for n in pm_est.instruments[0].notes])
|
| 44 |
+
est_pit = np.array([pretty_midi.note_number_to_hz(n.pitch) for n in pm_est.instruments[0].notes])
|
| 45 |
+
|
| 46 |
+
sc = mir_eval.transcription.evaluate(ref_int, ref_pit, est_int, est_pit,
|
| 47 |
+
onset_tolerance=ONSET_TOL, offset_ratio=OFFSET_RATIO_VAL)
|
| 48 |
+
|
| 49 |
+
# 2. Frame Accuracy (Acc)
|
| 50 |
+
acc = get_frame_accuracy(pm_ref, pm_est)
|
| 51 |
+
|
| 52 |
+
return acc, sc['F-measure_no_offset'] * 100, sc['F-measure'] * 100
|
| 53 |
+
except Exception: return 0, 0, 0
|
| 54 |
+
|
| 55 |
+
def main():
|
| 56 |
+
print("--- 📊 EVALUACIÓN FINAL TFG (ESTILO PAPER SPOTIFY) ---")
|
| 57 |
+
res = []
|
| 58 |
+
gts = [f for f in os.listdir(DIR_GT) if f.endswith(".mid")]
|
| 59 |
+
|
| 60 |
+
for gt_file in gts:
|
| 61 |
+
name = os.path.splitext(gt_file)[0]
|
| 62 |
+
c_acc, c_fno, c_f = get_full_metrics(os.path.join(DIR_GT, gt_file), os.path.join(DIR_PRED_MY, name + ".mid"))
|
| 63 |
+
o_acc, o_fno, o_f = get_full_metrics(os.path.join(DIR_GT, gt_file), os.path.join(DIR_PRED_OFF, name + ".mid"))
|
| 64 |
+
|
| 65 |
+
res.append({
|
| 66 |
+
"Archivo": name,
|
| 67 |
+
"CAI_Acc": c_acc, "CAI_Fno": c_fno, "CAI_F": c_f,
|
| 68 |
+
"OFF_Acc": o_acc, "OFF_Fno": o_fno, "OFF_F": o_f
|
| 69 |
+
})
|
| 70 |
+
|
| 71 |
+
df = pd.DataFrame(res)
|
| 72 |
+
m = df.mean(numeric_only=True)
|
| 73 |
+
|
| 74 |
+
print("\n" + "="*50)
|
| 75 |
+
print(f"{'Model':<20} | {'Acc':<8} | {'Fno':<8} | {'F':<8}")
|
| 76 |
+
print("-" * 50)
|
| 77 |
+
print(f"{'CornetAI (V3)':<20} | {m['CAI_Acc']:<8.2f} | {m['CAI_Fno']:<8.2f} | {m['CAI_F']:<8.2f}")
|
| 78 |
+
print(f"{'Basic Pitch':<20} | {m['OFF_Acc']:<8.2f} | {m['OFF_Fno']:<8.2f} | {m['OFF_F']:<8.2f}")
|
| 79 |
+
print("="*50)
|
| 80 |
+
|
| 81 |
+
df.to_csv("Tabla_TFG_Estilo_Paper.csv", index=False)
|
| 82 |
+
print("\n Tabla final generada: 'Tabla_TFG_Estilo_Paper.csv'")
|
| 83 |
+
|
| 84 |
+
if __name__ == "__main__":
|
| 85 |
+
main()
|
src/generadores/generador_maestro_midis.py
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pretty_midi
|
| 2 |
+
import random
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
# --- CONFIGURACIÓN ---
|
| 6 |
+
OUTPUT_FOLDER = "dataset_midis"
|
| 7 |
+
CANTIDAD_RITMICOS = 250
|
| 8 |
+
CANTIDAD_LEGATOS = 250
|
| 9 |
+
ALLOWED_PITCHES = [67, 68, 72, 73, 76]
|
| 10 |
+
|
| 11 |
+
def get_dynamic_level():
|
| 12 |
+
niveles = [
|
| 13 |
+
(45, 60), # Piano (Suave)
|
| 14 |
+
(65, 85), # Mezzoforte (Normal)
|
| 15 |
+
(90, 127), # Fortissimo (Fuerte)
|
| 16 |
+
]
|
| 17 |
+
# Se da más peso al Mezzoforte y Fortissimo, pero incluimos Piano
|
| 18 |
+
return random.choices(niveles, weights=[0.2, 0.5, 0.3])[0]
|
| 19 |
+
|
| 20 |
+
def get_next_stepwise(current_pitch):
|
| 21 |
+
idx = ALLOWED_PITCHES.index(current_pitch)
|
| 22 |
+
moves = []
|
| 23 |
+
if idx > 0: moves.append(idx - 1)
|
| 24 |
+
if idx < len(ALLOWED_PITCHES) - 1: moves.append(idx + 1)
|
| 25 |
+
return ALLOWED_PITCHES[random.choice(moves)]
|
| 26 |
+
|
| 27 |
+
def generar_ritmico(index):
|
| 28 |
+
pm = pretty_midi.PrettyMIDI()
|
| 29 |
+
inst = pretty_midi.Instrument(program=0, name="Corneta Ritmica")
|
| 30 |
+
current = 0.5
|
| 31 |
+
|
| 32 |
+
while current < 15.0:
|
| 33 |
+
# Cambio de dinámica cada cierto tiempo
|
| 34 |
+
min_v, max_v = get_dynamic_level()
|
| 35 |
+
|
| 36 |
+
# Frase de 4 a 8 notas con esa dinámica
|
| 37 |
+
notas_frase = random.randint(4, 8)
|
| 38 |
+
|
| 39 |
+
for _ in range(notas_frase):
|
| 40 |
+
pitch = random.choice(ALLOWED_PITCHES)
|
| 41 |
+
dur = random.choice([0.2, 0.4, 0.6]) # Staccato rápido
|
| 42 |
+
|
| 43 |
+
# Velocidad dentro del rango dinámico actual
|
| 44 |
+
vel = random.randint(min_v, max_v)
|
| 45 |
+
|
| 46 |
+
note = pretty_midi.Note(velocity=vel, pitch=pitch, start=current, end=current+dur)
|
| 47 |
+
inst.notes.append(note)
|
| 48 |
+
|
| 49 |
+
gap = random.uniform(0.1, 0.3)
|
| 50 |
+
current += dur + gap
|
| 51 |
+
|
| 52 |
+
if current > 15.0: break
|
| 53 |
+
|
| 54 |
+
# Silencio entre frases de distinta dinámica
|
| 55 |
+
current += random.uniform(0.5, 1.5)
|
| 56 |
+
|
| 57 |
+
pm.instruments.append(inst)
|
| 58 |
+
return pm
|
| 59 |
+
|
| 60 |
+
def generar_legato(index):
|
| 61 |
+
pm = pretty_midi.PrettyMIDI()
|
| 62 |
+
inst = pretty_midi.Instrument(program=0, name="Corneta Legato")
|
| 63 |
+
current = 0.5
|
| 64 |
+
|
| 65 |
+
while current < 14.0:
|
| 66 |
+
num_notas = random.randint(3, 6)
|
| 67 |
+
pitch = random.choice(ALLOWED_PITCHES)
|
| 68 |
+
|
| 69 |
+
# Elegir dinámica para toda la frase (ej. un solo piano)
|
| 70 |
+
min_v, max_v = get_dynamic_level()
|
| 71 |
+
|
| 72 |
+
for n in range(num_notas):
|
| 73 |
+
dur = random.uniform(1.2, 3.0) # Notas muy largas
|
| 74 |
+
start = current
|
| 75 |
+
end = current + dur + 0.05 # Solapamiento generoso para legato
|
| 76 |
+
|
| 77 |
+
# Pequeña variación humana dentro de la frase
|
| 78 |
+
vel = random.randint(min_v, max_v)
|
| 79 |
+
|
| 80 |
+
note = pretty_midi.Note(velocity=vel, pitch=pitch, start=start, end=end)
|
| 81 |
+
inst.notes.append(note)
|
| 82 |
+
|
| 83 |
+
current += dur
|
| 84 |
+
if n < num_notas - 1:
|
| 85 |
+
pitch = get_next_stepwise(pitch)
|
| 86 |
+
|
| 87 |
+
current += random.uniform(1.5, 2.5)
|
| 88 |
+
|
| 89 |
+
pm.instruments.append(inst)
|
| 90 |
+
return pm
|
| 91 |
+
|
| 92 |
+
if __name__ == "__main__":
|
| 93 |
+
if not os.path.exists(OUTPUT_FOLDER): os.makedirs(OUTPUT_FOLDER)
|
| 94 |
+
|
| 95 |
+
# Limpieza previa opcional (descomentar si quieres borrar los viejos)
|
| 96 |
+
# for f in os.listdir(OUTPUT_FOLDER): os.remove(os.path.join(OUTPUT_FOLDER, f))
|
| 97 |
+
|
| 98 |
+
print(f"--- Generando {CANTIDAD_RITMICOS + CANTIDAD_LEGATOS} archivos con Dinámicas ---")
|
| 99 |
+
|
| 100 |
+
for i in range(CANTIDAD_RITMICOS):
|
| 101 |
+
midi = generar_ritmico(i)
|
| 102 |
+
midi.write(os.path.join(OUTPUT_FOLDER, f"dinamico_ritmico_{i:03d}.mid"))
|
| 103 |
+
|
| 104 |
+
for i in range(CANTIDAD_LEGATOS):
|
| 105 |
+
midi = generar_legato(i)
|
| 106 |
+
midi.write(os.path.join(OUTPUT_FOLDER, f"dinamico_legato_{i:03d}.mid"))
|
| 107 |
+
|
| 108 |
+
print("Dataset generado.")
|
src/generadores/generadoraudio.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import subprocess
|
| 3 |
+
import pretty_midi
|
| 4 |
+
|
| 5 |
+
# --- CONFIGURA ESTO ---
|
| 6 |
+
SOUNDFONT_PATH = r"C:\Users\pampi\Documents\corneta.sf2"
|
| 7 |
+
INPUT_FOLDER = "a"
|
| 8 |
+
OUTPUT_FOLDER = "b"
|
| 9 |
+
TARGET_PROGRAM = 40 # Preset de tu corneta (0-127)
|
| 10 |
+
# ----------------------
|
| 11 |
+
|
| 12 |
+
def synthesize():
|
| 13 |
+
if not os.path.exists(OUTPUT_FOLDER): os.makedirs(OUTPUT_FOLDER)
|
| 14 |
+
|
| 15 |
+
# Limpiar WAVs viejos si existen
|
| 16 |
+
for f in os.listdir(OUTPUT_FOLDER): os.remove(os.path.join(OUTPUT_FOLDER, f))
|
| 17 |
+
|
| 18 |
+
files = [f for f in os.listdir(INPUT_FOLDER) if f.endswith('.mid')]
|
| 19 |
+
print(f"Sintetizando {len(files)} archivos...")
|
| 20 |
+
|
| 21 |
+
for midi_file in files:
|
| 22 |
+
# Parchear MIDI
|
| 23 |
+
midi_path = os.path.join(INPUT_FOLDER, midi_file)
|
| 24 |
+
pm = pretty_midi.PrettyMIDI(midi_path)
|
| 25 |
+
for inst in pm.instruments:
|
| 26 |
+
inst.program = TARGET_PROGRAM
|
| 27 |
+
|
| 28 |
+
temp_midi = os.path.join(INPUT_FOLDER, "temp.mid")
|
| 29 |
+
pm.write(temp_midi)
|
| 30 |
+
|
| 31 |
+
# Convertir
|
| 32 |
+
wav_name = midi_file.replace(".mid", ".wav")
|
| 33 |
+
wav_path = os.path.join(OUTPUT_FOLDER, wav_name)
|
| 34 |
+
|
| 35 |
+
cmd = [
|
| 36 |
+
'fluidsynth', '-ni', '-F', wav_path, '-r', '22050', '-g', '1.0',
|
| 37 |
+
SOUNDFONT_PATH, temp_midi
|
| 38 |
+
]
|
| 39 |
+
|
| 40 |
+
subprocess.run(cmd, check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
| 41 |
+
print(f"-> {wav_name}")
|
| 42 |
+
|
| 43 |
+
if os.path.exists(temp_midi): os.remove(temp_midi)
|
| 44 |
+
print("Paso 2: WAVs generados.")
|
| 45 |
+
|
| 46 |
+
if __name__ == "__main__":
|
| 47 |
+
synthesize()
|
src/generadores/genmidisreales.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pretty_midi
|
| 2 |
+
import random
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
# --- CONFIGURACIÓN ---
|
| 6 |
+
OUTPUT_FOLDER = "dataset_midis_guia"
|
| 7 |
+
NUM_FILES = 20
|
| 8 |
+
ALLOWED_PITCHES = [67, 68, 72, 73] # Sol4, Lab4, Do5, Reb5
|
| 9 |
+
|
| 10 |
+
def generate_human_guides():
|
| 11 |
+
if not os.path.exists(OUTPUT_FOLDER):
|
| 12 |
+
os.makedirs(OUTPUT_FOLDER)
|
| 13 |
+
|
| 14 |
+
print(f"Generando {NUM_FILES} partituras guía para grabación...")
|
| 15 |
+
|
| 16 |
+
for i in range(NUM_FILES):
|
| 17 |
+
pm = pretty_midi.PrettyMIDI()
|
| 18 |
+
instrument = pretty_midi.Instrument(program=0, name="Corneta Guia")
|
| 19 |
+
|
| 20 |
+
# Empezamos en el segundo 2.0
|
| 21 |
+
current_time = 2.0
|
| 22 |
+
total_duration = 15.0
|
| 23 |
+
|
| 24 |
+
while current_time < total_duration:
|
| 25 |
+
# LÓGICA MÁS MUSICAL
|
| 26 |
+
pitch = random.choice(ALLOWED_PITCHES)
|
| 27 |
+
|
| 28 |
+
# 0.5s, 1.0s, 2.0s
|
| 29 |
+
duration_choice = random.choice([0.5, 0.8, 1.2, 2.0])
|
| 30 |
+
|
| 31 |
+
# Variamos la intensidad (Dinámica)
|
| 32 |
+
velocity = random.randint(95, 120)
|
| 33 |
+
|
| 34 |
+
note = pretty_midi.Note(
|
| 35 |
+
velocity=velocity,
|
| 36 |
+
pitch=pitch,
|
| 37 |
+
start=current_time,
|
| 38 |
+
end=current_time + duration_choice
|
| 39 |
+
)
|
| 40 |
+
instrument.notes.append(note)
|
| 41 |
+
|
| 42 |
+
# TIEMPO DE RESPIRACIÓN
|
| 43 |
+
gap = random.uniform(0.3, 0.8)
|
| 44 |
+
current_time += duration_choice + gap
|
| 45 |
+
|
| 46 |
+
pm.instruments.append(instrument)
|
| 47 |
+
filename = f"guia_{i:02d}.mid"
|
| 48 |
+
pm.write(os.path.join(OUTPUT_FOLDER, filename))
|
| 49 |
+
print(f"-> {filename}")
|
| 50 |
+
|
| 51 |
+
print("¡Listos! Ahora conviértelos a WAV y úsalos de backing track.")
|
| 52 |
+
|
| 53 |
+
if __name__ == "__main__":
|
| 54 |
+
generate_human_guides()
|