pachet commited on
Commit
99a4b1a
·
1 Parent(s): 700130e

Add files to project

Browse files
Files changed (4) hide show
  1. app.py +85 -14
  2. apt.txt +3 -0
  3. hexachords.py +126 -0
  4. requirements.txt +6 -1
app.py CHANGED
@@ -3,43 +3,111 @@ import mido
3
  from mido import Message, MidiFile, MidiTrack
4
  import numpy as np
5
  import os
 
 
6
 
 
 
 
7
 
8
- def generate_chords(hexachord):
9
  # Placeholder for your actual chord generation function
10
  # Assuming hexachord is a list of MIDI note numbers
11
- chords = []
12
- for i in range(4): # Generate 4 chords
13
- chords.append([note + (i * 2) for note in hexachord]) # Simple transposition
14
- return chords
15
-
16
 
17
  def create_midi(chords):
18
  mid = MidiFile()
19
  track = MidiTrack()
20
  mid.tracks.append(track)
21
-
22
  for chord in chords:
23
  for note in chord:
24
- track.append(Message('note_on', note=int(note), velocity=64, time=0))
25
  for note in chord:
26
- track.append(Message('note_off', note=int(note), velocity=64, time=480))
27
-
28
  midi_path = "output.mid"
29
  mid.save(midi_path)
30
  return midi_path
31
 
32
 
33
- def process_hexachord(note1, note2, note3, note4, note5, note6):
34
- notes = [note1, note2, note3, note4, note5, note6]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  notes = [int(note) for note in notes if note is not None] # Convert to int, remove None values
36
 
37
  if len(notes) != 6 or len(set(notes)) != 6:
38
  return "Please select exactly 6 unique notes."
39
 
 
40
  chords = generate_chords(notes)
 
41
  midi_path = create_midi(chords)
42
- return midi_path
 
 
 
 
 
 
 
43
 
44
 
45
  # UI Components
@@ -55,11 +123,14 @@ with gr.Blocks() as ui:
55
 
56
  generate_button = gr.Button("Generate Chords")
57
  midi_output = gr.File(label="Generated MIDI")
 
 
 
58
 
59
  generate_button.click(
60
  fn=process_hexachord,
61
  inputs=note_inputs,
62
- outputs=[midi_output]
63
  )
64
 
65
  # Detect if running on Hugging Face Spaces
 
3
  from mido import Message, MidiFile, MidiTrack
4
  import numpy as np
5
  import os
6
+ import matplotlib.pyplot as plt
7
+ import subprocess
8
 
9
+ from music21 import midi, converter
10
+ from pydub import AudioSegment
11
+ import hexachords
12
 
13
+ def generate_chords(midi_pitches):
14
  # Placeholder for your actual chord generation function
15
  # Assuming hexachord is a list of MIDI note numbers
16
+ hexa = hexachords.Hexachord()
17
+ cs1 = hexa.generate_chord_sequence_from_midi_pitches(midi_pitches, intrvl="P4")
18
+ return cs1
 
 
19
 
20
  def create_midi(chords):
21
  mid = MidiFile()
22
  track = MidiTrack()
23
  mid.tracks.append(track)
24
+ start_time = 0
25
  for chord in chords:
26
  for note in chord:
27
+ track.append(Message('note_on', note=note.pitch.midi, velocity=64, time=0))
28
  for note in chord:
29
+ track.append(Message('note_off', note=note.pitch.midi, velocity=0, time=480))
 
30
  midi_path = "output.mid"
31
  mid.save(midi_path)
32
  return midi_path
33
 
34
 
35
+ def generate_piano_roll(chords):
36
+ fig, ax = plt.subplots(figsize=(8, 4))
37
+
38
+ for i, chord in enumerate(chords):
39
+ for note in chord:
40
+ ax.broken_barh([(i * 1, 0.8)], (note.pitch.midi - 0.4, 0.8), facecolors='blue')
41
+
42
+ ax.set_xlabel("Chord Progression")
43
+ ax.set_ylabel("MIDI Note Number")
44
+ min_min_chords = 128
45
+ max_max_chords = 0
46
+ for ch in chords:
47
+ for note in ch:
48
+ if note.pitch.midi < min_min_chords:
49
+ min_min_chords = note.pitch.midi
50
+ if note.pitch.midi > max_max_chords:
51
+ max_max_chords = note.pitch.midi
52
+ ax.set_yticks(range(min_min_chords, max_max_chords + 1, 2))
53
+ ax.set_xticks(range(len(chords)))
54
+ ax.set_xticklabels([f"Chord {i + 1}" for i in range(len(chords))])
55
+
56
+ plt.grid(True, linestyle='--', alpha=0.5)
57
+ plt.savefig("piano_roll.png")
58
+ return "piano_roll.png"
59
+
60
+ def generate_music_score(midi_path):
61
+ mf = midi.MidiFile()
62
+ mf.open(midi_path)
63
+ mf.read()
64
+ mf.close()
65
+ score = converter.parse(midi_path)
66
+ # score = converter.parse(mf)
67
+ score_img_path = "music_score"
68
+ score.write(fmt='lily.png', fp=score_img_path)
69
+ return score_img_path
70
+
71
+ def convert_midi_to_audio(midi_path):
72
+ wav_path = "output.wav"
73
+ mp3_path = "output.mp3"
74
+ soundfont_path = "soundfont.sf2" # Ensure you have a SoundFont file
75
+
76
+ if not os.path.exists(soundfont_path):
77
+ return "Error: SoundFont file not found. Please provide a valid .sf2 file."
78
+
79
+ try:
80
+ subprocess.run(["fluidsynth", "-ni", soundfont_path, midi_path, "-F", wav_path, "-r", "44100"], check=True)
81
+
82
+ # Convert WAV to MP3
83
+ AudioSegment.converter = "ffmpeg"
84
+ audio = AudioSegment.from_wav(wav_path)
85
+ audio.export(mp3_path, format="mp3")
86
+ return mp3_path
87
+ except Exception as e:
88
+ return f"Error converting MIDI to audio: {str(e)}"
89
+
90
+
91
+ def process_hexachord(*notes):
92
+ print(f'Received notes: {notes}')
93
+ notes = list(notes)
94
  notes = [int(note) for note in notes if note is not None] # Convert to int, remove None values
95
 
96
  if len(notes) != 6 or len(set(notes)) != 6:
97
  return "Please select exactly 6 unique notes."
98
 
99
+ print('Generating chords...')
100
  chords = generate_chords(notes)
101
+ print('Creating MIDI file...')
102
  midi_path = create_midi(chords)
103
+ print('Generating piano roll...')
104
+ piano_roll_path = generate_piano_roll(chords)
105
+ print('Generating music score...')
106
+ score_path = generate_music_score(midi_path)
107
+ print('Converting MIDI to audio...')
108
+ audio_path = convert_midi_to_audio(midi_path)
109
+
110
+ return midi_path, piano_roll_path, score_path, audio_path
111
 
112
 
113
  # UI Components
 
123
 
124
  generate_button = gr.Button("Generate Chords")
125
  midi_output = gr.File(label="Generated MIDI")
126
+ piano_roll_output = gr.Image(label="Piano Roll Visualization")
127
+ music_score_output = gr.Image(label="Score Visualization")
128
+ audio_output = gr.Audio(label="Play Generated Chords", value=None, interactive=False)
129
 
130
  generate_button.click(
131
  fn=process_hexachord,
132
  inputs=note_inputs,
133
+ outputs=[midi_output, piano_roll_output, music_score_output, audio_output]
134
  )
135
 
136
  # Detect if running on Hugging Face Spaces
apt.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ fluidsynth
2
+ ffmpeg
3
+ lilypond
hexachords.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from music21 import note, stream, interval, meter, chord
2
+ from ortools.sat.python import cp_model
3
+
4
+ class Hexachord:
5
+
6
+ def generate_chord_sequence_from_midi_pitches(self, list_of_mp, intrvl="P5"):
7
+ return self.generate_chord_sequence([note.Note(mp).nameWithOctave for mp in list_of_mp])
8
+
9
+ def generate_chord_sequence(self, list_of_notes, intrvl="P5"):
10
+
11
+ hexachord = [note.Note(n) for n in list_of_notes]
12
+ # print(hexachord)
13
+ s = stream.Stream()
14
+ s.append(hexachord)
15
+ # Play the sequence
16
+ # s.show('midi')
17
+ # build chords
18
+ fifth = interval.Interval(intrvl) # Perfect fifth
19
+ all_pc = [n.pitch.pitchClass for n in hexachord]
20
+ all_chords = []
21
+ for n in hexachord:
22
+ ch = chord.Chord([n])
23
+ current_note = n
24
+ while len(ch) < 6:
25
+ current_note = fifth.transposeNote(current_note)
26
+ if current_note.pitch.pitchClass in all_pc and current_note not in ch:
27
+ max_pitch = ch[-1].pitch
28
+ while interval.Interval(noteStart=ch[-1], noteEnd=current_note).semitones > 12:
29
+ current_note = current_note.transpose(-12)
30
+ ch.add(current_note)
31
+ all_chords.append(ch)
32
+ return all_chords
33
+
34
+ def chords_to_stream(self, chords, file_name):
35
+ s = stream.Stream()
36
+ s.append(meter.TimeSignature("4/4"))
37
+ for c in chords:
38
+ ch = chord.Chord(c)
39
+ ch.duration.quarterLength = 4
40
+ s.append(ch)
41
+ # s.show('midi')
42
+ s.write('midi',file_name)
43
+ return s
44
+
45
+
46
+ def alternate_chords(self, s1, s2):
47
+ """Create a new stream alternating between chords from s1 and s2"""
48
+ new_stream = stream.Stream()
49
+
50
+ # Get chords from both streams
51
+ chords1 = list(s1.getElementsByClass(chord.Chord))
52
+ chords2 = list(s2.getElementsByClass(chord.Chord))
53
+
54
+ # Interleave chords from s1 and s2
55
+ for c1, c2 in zip(chords1, chords2):
56
+ new_stream.append(c1)
57
+ new_stream.append(c2)
58
+ return new_stream
59
+
60
+
61
+ def optimize_voice_leading(self, chord_sequence):
62
+ model = cp_model.CpModel()
63
+ octave_variables = {}
64
+ movement_vars = []
65
+
66
+ # Define variables and domains (allowing octave shifts)
67
+ for i, ch in enumerate(chord_sequence):
68
+ for n in ch.notes:
69
+ var_name = f"chord_{i}_note_{n.nameWithOctave}"
70
+ octave_variables[var_name] = model.NewIntVar(n.octave - 1, n.octave + 1, var_name) # Allow octave shifts
71
+ spread_vars = []
72
+ # Add constraints to minimize movement between chords
73
+ for i in range(len(chord_sequence) - 1):
74
+ max_octave = model.NewIntVar(0, 10, "max_pitch"+str(i))
75
+ min_octave = model.NewIntVar(0, 10, "min_pitch"+str(i))
76
+ for n in chord_sequence[i]:
77
+ v = octave_variables[f"chord_{i}_note_{n.nameWithOctave}"]
78
+ # model.Add(max_pitch >= v) # max_pitch must be at least as high as any note
79
+ # model.Add(min_pitch <= v) # min_pitch must
80
+ spread_var = max_octave - min_octave
81
+ spread_vars.append(spread_var)
82
+ for i in range(len(chord_sequence) - 1):
83
+ for n1, n2 in zip(chord_sequence[i].notes, chord_sequence[i + 1].notes):
84
+ var1 = octave_variables[f"chord_{i}_note_{n1.nameWithOctave}"]
85
+ var2 = octave_variables[f"chord_{i+1}_note_{n2.nameWithOctave}"]
86
+ # Define movement variable
87
+ movement_var = model.NewIntVar(0, 36, f"movement_{i}_{n1.name}")
88
+ model.AddAbsEquality(movement_var, var2 - var1)
89
+ # Track movement variable in objective function
90
+ movement_vars.append(movement_var)
91
+ # Define objective: minimize sum of all movement values
92
+ model.Minimize(sum(spread_vars))
93
+ # obj_var = sum(movement_vars)
94
+ # model.Minimize(obj_var)
95
+ # Solve
96
+ solver = cp_model.CpSolver()
97
+ solver.Solve(model)
98
+ # print(solver.Value(obj_var))
99
+ # for v in variables:
100
+ # print(v)
101
+ # print(variables[v].Proto().domain)
102
+ # print(solver.Value(variables[v]))
103
+ # Apply changes to music21 chord sequence
104
+ optimized_chords = []
105
+ for i, ch in enumerate(chord_sequence):
106
+ new_chord = chord.Chord([note.Note(f"{n.name}{solver.Value(octave_variables[f'chord_{i}_note_{n.nameWithOctave}'])}")
107
+ for n in ch.notes])
108
+ optimized_chords.append(new_chord)
109
+
110
+ return optimized_chords
111
+
112
+
113
+ if __name__ == '__main__':
114
+ hexa = Hexachord()
115
+ cs1 = hexa.generate_chord_sequence(["C3", "D3", "E3", "G3", "A3", "B3"], intrvl="P4")
116
+ # cs1 = generate_chord_sequence(["E3", "G3", "Ab3", "B3", "C4", "Eb4"])
117
+ # cs2 = generate_chord_sequence(["C3", "F3", "F#3", "A3", "B4", "E4"])
118
+ # alternation = alternate_chords(cs1, cs2)
119
+ # alternation.write('midi',"alternation.mid")
120
+
121
+ hexa.chords_to_stream(cs1, 'temp.mid').show('text')
122
+ # optimized = optimize_voice_leading([c1, c2, c3])
123
+ optimized = hexa.optimize_voice_leading(cs1)
124
+ stream1 = stream.Stream(optimized)
125
+ stream1.show('text')
126
+ # stream1.write('midi', "optimized.mid")
requirements.txt CHANGED
@@ -1,3 +1,8 @@
1
  gradio
2
  mido
3
- numpy
 
 
 
 
 
 
1
  gradio
2
  mido
3
+ numpy
4
+ matplotlib
5
+ pydub
6
+ ffmpeg
7
+ music21
8
+ ortools