hjimjim
commited on
Commit
·
b672e6e
1
Parent(s):
b78b541
upload model
Browse files- app.py +13 -11
- requirements.txt +0 -1
app.py
CHANGED
|
@@ -2,11 +2,14 @@ import streamlit as st
|
|
| 2 |
import torch
|
| 3 |
import numpy as np
|
| 4 |
import matplotlib.pyplot as plt
|
| 5 |
-
|
| 6 |
import pretty_midi as pm
|
|
|
|
| 7 |
from VAE import VAE
|
| 8 |
-
|
| 9 |
import pretty_midi as pm
|
|
|
|
|
|
|
| 10 |
|
| 11 |
|
| 12 |
# Define device
|
|
@@ -70,25 +73,23 @@ def reconstruct(right, left, model):
|
|
| 70 |
input_tensor = torch.cat([right_tensor, left_tensor], dim=0)
|
| 71 |
input_tensor = input_tensor.unsqueeze(0)
|
| 72 |
|
| 73 |
-
|
| 74 |
with torch.no_grad():
|
| 75 |
recon_data, _, _, _ = model(input_tensor)
|
| 76 |
|
| 77 |
return recon_data.squeeze(0).cpu().numpy()
|
| 78 |
|
| 79 |
|
|
|
|
|
|
|
|
|
|
| 80 |
|
| 81 |
-
|
| 82 |
-
fs = FluidSynth(sound_font_path)
|
| 83 |
-
fs.midi_to_audio(midi_file, wav_file)
|
| 84 |
|
| 85 |
-
|
| 86 |
-
louder_audio = audio + volume_increase_db
|
| 87 |
-
|
| 88 |
-
louder_audio.export(wav_file, format="wav")
|
| 89 |
-
|
| 90 |
return wav_file
|
| 91 |
|
|
|
|
| 92 |
# Create a MIDI stream from piano roll data
|
| 93 |
def create_midi_from_piano_roll(right_hand, left_hand, fs=8):
|
| 94 |
pm_obj = pm.PrettyMIDI()
|
|
@@ -142,6 +143,7 @@ def convert_to_midi(right_hand, left_hand, file_name="output.mid", fs=8):
|
|
| 142 |
# Streamlit interface
|
| 143 |
st.title("GRU-VAE Reconstruction Demo")
|
| 144 |
model = load_model()
|
|
|
|
| 145 |
|
| 146 |
# File upload
|
| 147 |
uploaded_file = st.file_uploader("Upload a MIDI file", type=["mid", "midi"])
|
|
|
|
| 2 |
import torch
|
| 3 |
import numpy as np
|
| 4 |
import matplotlib.pyplot as plt
|
| 5 |
+
|
| 6 |
import pretty_midi as pm
|
| 7 |
+
|
| 8 |
from VAE import VAE
|
| 9 |
+
|
| 10 |
import pretty_midi as pm
|
| 11 |
+
from scipy.io.wavfile import write
|
| 12 |
+
|
| 13 |
|
| 14 |
|
| 15 |
# Define device
|
|
|
|
| 73 |
input_tensor = torch.cat([right_tensor, left_tensor], dim=0)
|
| 74 |
input_tensor = input_tensor.unsqueeze(0)
|
| 75 |
|
| 76 |
+
|
| 77 |
with torch.no_grad():
|
| 78 |
recon_data, _, _, _ = model(input_tensor)
|
| 79 |
|
| 80 |
return recon_data.squeeze(0).cpu().numpy()
|
| 81 |
|
| 82 |
|
| 83 |
+
def midi_to_wav(midi_file, wav_file="output.wav", volume_increase_db=17):
|
| 84 |
+
midi_data = pm.PrettyMIDI(midi_file)
|
| 85 |
+
audio_data = midi_data.synthesize(fs=44100)
|
| 86 |
|
| 87 |
+
audio_data = np.int16(audio_data / np.max(np.abs(audio_data)) * 32767 * 0.9)
|
|
|
|
|
|
|
| 88 |
|
| 89 |
+
write(wav_file, 44100, audio_data)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 90 |
return wav_file
|
| 91 |
|
| 92 |
+
|
| 93 |
# Create a MIDI stream from piano roll data
|
| 94 |
def create_midi_from_piano_roll(right_hand, left_hand, fs=8):
|
| 95 |
pm_obj = pm.PrettyMIDI()
|
|
|
|
| 143 |
# Streamlit interface
|
| 144 |
st.title("GRU-VAE Reconstruction Demo")
|
| 145 |
model = load_model()
|
| 146 |
+
|
| 147 |
|
| 148 |
# File upload
|
| 149 |
uploaded_file = st.file_uploader("Upload a MIDI file", type=["mid", "midi"])
|
requirements.txt
CHANGED
|
@@ -1,6 +1,5 @@
|
|
| 1 |
streamlit==1.16.0
|
| 2 |
torch==1.11.0
|
| 3 |
pretty_midi
|
| 4 |
-
midi2audio
|
| 5 |
scipy
|
| 6 |
pydub
|
|
|
|
| 1 |
streamlit==1.16.0
|
| 2 |
torch==1.11.0
|
| 3 |
pretty_midi
|
|
|
|
| 4 |
scipy
|
| 5 |
pydub
|