Miguel Jaramillo commited on
Add files via upload
Browse files
app.py
CHANGED
|
@@ -1,58 +1,17 @@
|
|
| 1 |
# -*- coding: utf-8 -*-
|
| 2 |
-
"""
|
| 3 |
|
| 4 |
Automatically generated by Colaboratory.
|
| 5 |
|
| 6 |
Original file is located at
|
| 7 |
-
https://colab.research.google.com/
|
| 8 |
-
|
| 9 |
-
##### Copyright 2020 The TensorFlow Hub Authors.
|
| 10 |
-
|
| 11 |
-
Licensed under the Apache License, Version 2.0 (the "License");
|
| 12 |
-
"""
|
| 13 |
-
|
| 14 |
-
#@title Copyright 2020 The TensorFlow Hub Authors. All Rights Reserved.
|
| 15 |
-
#
|
| 16 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 17 |
-
# you may not use this file except in compliance with the License.
|
| 18 |
-
# You may obtain a copy of the License at
|
| 19 |
-
#
|
| 20 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
| 21 |
-
#
|
| 22 |
-
# Unless required by applicable law or agreed to in writing, software
|
| 23 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 24 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 25 |
-
# See the License for the specific language governing permissions and
|
| 26 |
-
# limitations under the License.
|
| 27 |
-
# ==============================================================================
|
| 28 |
-
|
| 29 |
-
"""<table class="tfo-notebook-buttons" align="left">
|
| 30 |
-
<td>
|
| 31 |
-
<a target="_blank" href="https://www.tensorflow.org/hub/tutorials/spice"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
|
| 32 |
-
</td>
|
| 33 |
-
<td>
|
| 34 |
-
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/spice.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
|
| 35 |
-
</td>
|
| 36 |
-
<td>
|
| 37 |
-
<a target="_blank" href="https://github.com/tensorflow/hub/blob/master/examples/colab/spice.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View on GitHub</a>
|
| 38 |
-
</td>
|
| 39 |
-
<td>
|
| 40 |
-
<a href="https://storage.googleapis.com/tensorflow_docs/hub/examples/colab/spice.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
|
| 41 |
-
</td>
|
| 42 |
-
<td>
|
| 43 |
-
<a href="https://tfhub.dev/google/spice/2"><img src="https://www.tensorflow.org/images/hub_logo_32px.png" />See TF Hub model</a>
|
| 44 |
-
</td>
|
| 45 |
-
</table>
|
| 46 |
-
|
| 47 |
-
# Pitch Detection with SPICE
|
| 48 |
-
|
| 49 |
-
This colab will show you how to use the SPICE model downloaded from TensorFlow Hub.
|
| 50 |
"""
|
| 51 |
|
| 52 |
!sudo apt-get install -q -y timidity libsndfile1
|
| 53 |
-
|
| 54 |
-
# All the imports to deal with sound data
|
| 55 |
!pip install pydub numba==0.48 librosa music21
|
|
|
|
|
|
|
|
|
|
| 56 |
|
| 57 |
import tensorflow as tf
|
| 58 |
import tensorflow_hub as hub
|
|
@@ -74,152 +33,14 @@ from base64 import b64decode
|
|
| 74 |
|
| 75 |
import music21
|
| 76 |
from pydub import AudioSegment
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
logger.setLevel(logging.ERROR)
|
| 80 |
-
|
| 81 |
-
print("tensorflow: %s" % tf.__version__)
|
| 82 |
-
#print("librosa: %s" % librosa.__version__)
|
| 83 |
-
|
| 84 |
-
"""# The audio input file
|
| 85 |
-
Now the hardest part: Record your singing! :)
|
| 86 |
-
|
| 87 |
-
We provide four methods to obtain an audio file:
|
| 88 |
-
|
| 89 |
-
1. Record audio directly in colab
|
| 90 |
-
2. Upload from your computer
|
| 91 |
-
3. Use a file saved on Google Drive
|
| 92 |
-
4. Download the file from the web
|
| 93 |
-
|
| 94 |
-
Choose one of the four methods below.
|
| 95 |
-
"""
|
| 96 |
-
|
| 97 |
-
#@title [Run this] Definition of the JS code to record audio straight from the browser
|
| 98 |
-
|
| 99 |
-
RECORD = """
|
| 100 |
-
const sleep = time => new Promise(resolve => setTimeout(resolve, time))
|
| 101 |
-
const b2text = blob => new Promise(resolve => {
|
| 102 |
-
const reader = new FileReader()
|
| 103 |
-
reader.onloadend = e => resolve(e.srcElement.result)
|
| 104 |
-
reader.readAsDataURL(blob)
|
| 105 |
-
})
|
| 106 |
-
var record = time => new Promise(async resolve => {
|
| 107 |
-
stream = await navigator.mediaDevices.getUserMedia({ audio: true })
|
| 108 |
-
recorder = new MediaRecorder(stream)
|
| 109 |
-
chunks = []
|
| 110 |
-
recorder.ondataavailable = e => chunks.push(e.data)
|
| 111 |
-
recorder.start()
|
| 112 |
-
await sleep(time)
|
| 113 |
-
recorder.onstop = async ()=>{
|
| 114 |
-
blob = new Blob(chunks)
|
| 115 |
-
text = await b2text(blob)
|
| 116 |
-
resolve(text)
|
| 117 |
-
}
|
| 118 |
-
recorder.stop()
|
| 119 |
-
})
|
| 120 |
-
"""
|
| 121 |
-
|
| 122 |
-
def record(sec=5):
|
| 123 |
-
try:
|
| 124 |
-
from google.colab import output
|
| 125 |
-
except ImportError:
|
| 126 |
-
print('No possible to import output from google.colab')
|
| 127 |
-
return ''
|
| 128 |
-
else:
|
| 129 |
-
print('Recording')
|
| 130 |
-
display(Javascript(RECORD))
|
| 131 |
-
s = output.eval_js('record(%d)' % (sec*1000))
|
| 132 |
-
fname = 'recorded_audio.wav'
|
| 133 |
-
print('Saving to', fname)
|
| 134 |
-
b = b64decode(s.split(',')[1])
|
| 135 |
-
with open(fname, 'wb') as f:
|
| 136 |
-
f.write(b)
|
| 137 |
-
return fname
|
| 138 |
-
|
| 139 |
-
#@title Select how to input your audio { run: "auto" }
|
| 140 |
-
INPUT_SOURCE = 'https://storage.googleapis.com/download.tensorflow.org/data/c-scale-metronome.wav' #@param ["https://storage.googleapis.com/download.tensorflow.org/data/c-scale-metronome.wav", "RECORD", "UPLOAD", "./drive/My Drive/YOUR_MUSIC_FILE.wav"] {allow-input: true}
|
| 141 |
-
|
| 142 |
-
print('You selected', INPUT_SOURCE)
|
| 143 |
-
|
| 144 |
-
if INPUT_SOURCE == 'RECORD':
|
| 145 |
-
uploaded_file_name = record(5)
|
| 146 |
-
elif INPUT_SOURCE == 'UPLOAD':
|
| 147 |
-
try:
|
| 148 |
-
from google.colab import files
|
| 149 |
-
except ImportError:
|
| 150 |
-
print("ImportError: files from google.colab seems to not be available")
|
| 151 |
-
else:
|
| 152 |
-
uploaded = files.upload()
|
| 153 |
-
for fn in uploaded.keys():
|
| 154 |
-
print('User uploaded file "{name}" with length {length} bytes'.format(
|
| 155 |
-
name=fn, length=len(uploaded[fn])))
|
| 156 |
-
uploaded_file_name = next(iter(uploaded))
|
| 157 |
-
print('Uploaded file: ' + uploaded_file_name)
|
| 158 |
-
elif INPUT_SOURCE.startswith('./drive/'):
|
| 159 |
-
try:
|
| 160 |
-
from google.colab import drive
|
| 161 |
-
except ImportError:
|
| 162 |
-
print("ImportError: files from google.colab seems to not be available")
|
| 163 |
-
else:
|
| 164 |
-
drive.mount('/content/drive')
|
| 165 |
-
# don't forget to change the name of the file you
|
| 166 |
-
# will you here!
|
| 167 |
-
gdrive_audio_file = 'YOUR_MUSIC_FILE.wav'
|
| 168 |
-
uploaded_file_name = INPUT_SOURCE
|
| 169 |
-
elif INPUT_SOURCE.startswith('http'):
|
| 170 |
-
!wget --no-check-certificate 'https://storage.googleapis.com/download.tensorflow.org/data/c-scale-metronome.wav' -O c-scale.wav
|
| 171 |
-
uploaded_file_name = 'c-scale.wav'
|
| 172 |
-
else:
|
| 173 |
-
print('Unrecognized input format!')
|
| 174 |
-
print('Please select "RECORD", "UPLOAD", or specify a file hosted on Google Drive or a file from the web to download file to download')
|
| 175 |
-
|
| 176 |
-
"""# Preparing the audio data
|
| 177 |
-
|
| 178 |
-
Now we have the audio, let's convert it to the expected format and then listen to it!
|
| 179 |
-
|
| 180 |
-
The SPICE model needs as input an audio file at a sampling rate of 16kHz and with only one channel (mono).
|
| 181 |
-
|
| 182 |
-
To help you with this part, we created a function (`convert_audio_for_model`) to convert any wav file you have to the model's expected format:
|
| 183 |
-
"""
|
| 184 |
-
|
| 185 |
-
# Function that converts the user-created audio to the format that the model
|
| 186 |
-
# expects: bitrate 16kHz and only one channel (mono).
|
| 187 |
|
| 188 |
EXPECTED_SAMPLE_RATE = 16000
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
audio.export(output_file, format="wav")
|
| 194 |
-
return output_file
|
| 195 |
-
|
| 196 |
-
# Converting to the expected format for the model
|
| 197 |
-
# in all the input 4 input method before, the uploaded file name is at
|
| 198 |
-
# the variable uploaded_file_name
|
| 199 |
-
converted_audio_file = convert_audio_for_model(uploaded_file_name)
|
| 200 |
-
|
| 201 |
-
# Loading audio samples from the wav file:
|
| 202 |
-
sample_rate, audio_samples = wavfile.read(converted_audio_file, 'rb')
|
| 203 |
-
|
| 204 |
-
# Show some basic information about the audio.
|
| 205 |
-
duration = len(audio_samples)/sample_rate
|
| 206 |
-
print(f'Sample rate: {sample_rate} Hz')
|
| 207 |
-
print(f'Total duration: {duration:.2f}s')
|
| 208 |
-
print(f'Size of the input: {len(audio_samples)}')
|
| 209 |
-
|
| 210 |
-
# Let's listen to the wav file.
|
| 211 |
-
Audio(audio_samples, rate=sample_rate)
|
| 212 |
-
|
| 213 |
-
"""First thing, let's take a look at the waveform of our singing."""
|
| 214 |
-
|
| 215 |
-
# We can visualize the audio as a waveform.
|
| 216 |
-
_ = plt.plot(audio_samples)
|
| 217 |
-
|
| 218 |
-
"""A more informative visualization is the [spectrogram](https://en.wikipedia.org/wiki/Spectrogram), which shows frequencies present over time.
|
| 219 |
-
|
| 220 |
-
Here, we use a logarithmic frequency scale, to make the singing more clearly visible.
|
| 221 |
-
|
| 222 |
-
"""
|
| 223 |
|
| 224 |
MAX_ABS_INT16 = 32768.0
|
| 225 |
|
|
@@ -235,69 +56,51 @@ def plot_stft(x, sample_rate, show_black_and_white=False):
|
|
| 235 |
librosadisplay.specshow(data=x_stft_db, y_axis='log', sr=sample_rate)
|
| 236 |
|
| 237 |
plt.colorbar(format='%+2.0f dB')
|
|
|
|
| 238 |
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
""
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
"""
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
"""
|
| 283 |
-
|
| 284 |
-
confidence_outputs = list(confidence_outputs)
|
| 285 |
-
pitch_outputs = [ float(x) for x in pitch_outputs]
|
| 286 |
-
|
| 287 |
-
indices = range(len (pitch_outputs))
|
| 288 |
-
confident_pitch_outputs = [ (i,p)
|
| 289 |
-
for i, p, c in zip(indices, pitch_outputs, confidence_outputs) if c >= 0.9 ]
|
| 290 |
-
confident_pitch_outputs_x, confident_pitch_outputs_y = zip(*confident_pitch_outputs)
|
| 291 |
-
|
| 292 |
-
fig, ax = plt.subplots()
|
| 293 |
-
fig.set_size_inches(20, 10)
|
| 294 |
-
ax.set_ylim([0, 1])
|
| 295 |
-
plt.scatter(confident_pitch_outputs_x, confident_pitch_outputs_y, )
|
| 296 |
-
plt.scatter(confident_pitch_outputs_x, confident_pitch_outputs_y, c="r")
|
| 297 |
-
|
| 298 |
-
plt.show()
|
| 299 |
-
|
| 300 |
-
"""The pitch values returned by SPICE are in the range from 0 to 1. Let's convert them to absolute pitch values in Hz."""
|
| 301 |
|
| 302 |
def output2hz(pitch_output):
|
| 303 |
# Constants taken from https://tfhub.dev/google/spice/2
|
|
@@ -307,65 +110,6 @@ def output2hz(pitch_output):
|
|
| 307 |
BINS_PER_OCTAVE = 12.0;
|
| 308 |
cqt_bin = pitch_output * PT_SLOPE + PT_OFFSET;
|
| 309 |
return FMIN * 2.0 ** (1.0 * cqt_bin / BINS_PER_OCTAVE)
|
| 310 |
-
|
| 311 |
-
confident_pitch_values_hz = [ output2hz(p) for p in confident_pitch_outputs_y ]
|
| 312 |
-
|
| 313 |
-
"""Now, let's see how good the prediction is: We will overlay the predicted pitches over the original spectrogram. To make the pitch predictions more visible, we changed the spectrogram to black and white."""
|
| 314 |
-
|
| 315 |
-
plot_stft(audio_samples / MAX_ABS_INT16 ,
|
| 316 |
-
sample_rate=EXPECTED_SAMPLE_RATE, show_black_and_white=True)
|
| 317 |
-
# Note: conveniently, since the plot is in log scale, the pitch outputs
|
| 318 |
-
# also get converted to the log scale automatically by matplotlib.
|
| 319 |
-
plt.scatter(confident_pitch_outputs_x, confident_pitch_values_hz, c="r")
|
| 320 |
-
|
| 321 |
-
plt.show()
|
| 322 |
-
|
| 323 |
-
"""# Converting to musical notes
|
| 324 |
-
|
| 325 |
-
Now that we have the pitch values, let's convert them to notes!
|
| 326 |
-
This is part is challenging by itself. We have to take into account two things:
|
| 327 |
-
1. the rests (when there's no singing)
|
| 328 |
-
2. the size of each note (offsets)
|
| 329 |
-
|
| 330 |
-
### 1: Adding zeros to the output to indicate when there's no singing
|
| 331 |
-
"""
|
| 332 |
-
|
| 333 |
-
pitch_outputs_and_rests = [
|
| 334 |
-
output2hz(p) if c >= 0.9 else 0
|
| 335 |
-
for i, p, c in zip(indices, pitch_outputs, confidence_outputs)
|
| 336 |
-
]
|
| 337 |
-
|
| 338 |
-
"""### 2: Adding note offsets
|
| 339 |
-
|
| 340 |
-
When a person sings freely, the melody may have an offset to the absolute pitch values that notes can represent.
|
| 341 |
-
Hence, to convert predictions to notes, one needs to correct for this possible offset.
|
| 342 |
-
This is what the following code computes.
|
| 343 |
-
"""
|
| 344 |
-
|
| 345 |
-
A4 = 440
|
| 346 |
-
C0 = A4 * pow(2, -4.75)
|
| 347 |
-
note_names = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"]
|
| 348 |
-
|
| 349 |
-
def hz2offset(freq):
|
| 350 |
-
# This measures the quantization error for a single note.
|
| 351 |
-
if freq == 0: # Rests always have zero error.
|
| 352 |
-
return None
|
| 353 |
-
# Quantized note.
|
| 354 |
-
h = round(12 * math.log2(freq / C0))
|
| 355 |
-
return 12 * math.log2(freq / C0) - h
|
| 356 |
-
|
| 357 |
-
|
| 358 |
-
# The ideal offset is the mean quantization error for all the notes
|
| 359 |
-
# (excluding rests):
|
| 360 |
-
offsets = [hz2offset(p) for p in pitch_outputs_and_rests if p != 0]
|
| 361 |
-
print("offsets: ", offsets)
|
| 362 |
-
|
| 363 |
-
ideal_offset = statistics.mean(offsets)
|
| 364 |
-
print("ideal offset: ", ideal_offset)
|
| 365 |
-
|
| 366 |
-
"""We can now use some heuristics to try and estimate the most likely sequence of notes that were sung.
|
| 367 |
-
The ideal offset computed above is one ingredient - but we also need to know the speed (how many predictions make, say, an eighth?), and the time offset to start quantizing. To keep it simple, we'll just try different speeds and time offsets and measure the quantization error, using in the end the values that minimize this error.
|
| 368 |
-
"""
|
| 369 |
|
| 370 |
def quantize_predictions(group, ideal_offset):
|
| 371 |
# Group values are either 0, or a pitch in Hz.
|
|
@@ -415,123 +159,161 @@ def get_quantization_and_error(pitch_outputs_and_rests, predictions_per_eighth,
|
|
| 415 |
|
| 416 |
return quantization_error, notes_and_rests
|
| 417 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 418 |
|
| 419 |
-
|
| 420 |
-
|
| 421 |
-
|
| 422 |
-
|
| 423 |
-
|
| 424 |
-
|
| 425 |
-
|
| 426 |
-
error, notes_and_rests = get_quantization_and_error(
|
| 427 |
-
pitch_outputs_and_rests, predictions_per_note,
|
| 428 |
-
prediction_start_offset, ideal_offset)
|
| 429 |
-
|
| 430 |
-
if error < best_error:
|
| 431 |
-
best_error = error
|
| 432 |
-
best_notes_and_rests = notes_and_rests
|
| 433 |
-
best_predictions_per_note = predictions_per_note
|
| 434 |
-
|
| 435 |
-
# At this point, best_notes_and_rests contains the best quantization.
|
| 436 |
-
# Since we don't need to have rests at the beginning, let's remove these:
|
| 437 |
-
while best_notes_and_rests[0] == 'Rest':
|
| 438 |
-
best_notes_and_rests = best_notes_and_rests[1:]
|
| 439 |
-
# Also remove silence at the end.
|
| 440 |
-
while best_notes_and_rests[-1] == 'Rest':
|
| 441 |
-
best_notes_and_rests = best_notes_and_rests[:-1]
|
| 442 |
|
| 443 |
-
|
| 444 |
|
| 445 |
-
|
|
|
|
|
|
|
| 446 |
|
| 447 |
-
|
| 448 |
-
""
|
| 449 |
|
| 450 |
-
|
| 451 |
-
|
| 452 |
-
# Adjust the speed to match the actual singing.
|
| 453 |
-
bpm = 60 * 60 / best_predictions_per_note
|
| 454 |
-
print ('bpm: ', bpm)
|
| 455 |
-
a = music21.tempo.MetronomeMark(number=bpm)
|
| 456 |
-
sc.insert(0,a)
|
| 457 |
|
| 458 |
-
|
| 459 |
-
|
| 460 |
-
if snote == 'Rest':
|
| 461 |
-
sc.append(music21.note.Rest(type=d))
|
| 462 |
-
else:
|
| 463 |
-
sc.append(music21.note.Note(snote, type=d))
|
| 464 |
|
| 465 |
-
|
|
|
|
| 466 |
|
| 467 |
-
|
| 468 |
-
|
|
|
|
|
|
|
| 469 |
|
| 470 |
-
|
| 471 |
-
|
| 472 |
-
|
| 473 |
-
|
| 474 |
-
|
| 475 |
-
|
| 476 |
-
|
| 477 |
-
|
| 478 |
-
|
| 479 |
-
|
| 480 |
-
|
| 481 |
-
|
| 482 |
-
|
| 483 |
-
|
| 484 |
-
|
| 485 |
-
|
| 486 |
-
|
| 487 |
-
|
| 488 |
-
|
| 489 |
-
|
| 490 |
-
|
| 491 |
-
|
| 492 |
-
|
| 493 |
-
|
| 494 |
-
|
| 495 |
-
|
| 496 |
-
|
| 497 |
-
|
| 498 |
-
|
| 499 |
-
|
| 500 |
-
|
| 501 |
-
|
| 502 |
-
|
| 503 |
-
|
| 504 |
-
|
| 505 |
-
|
| 506 |
-
|
| 507 |
-
|
| 508 |
-
|
| 509 |
-
|
| 510 |
-
|
| 511 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 512 |
|
| 513 |
-
|
| 514 |
-
|
| 515 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 516 |
|
| 517 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 518 |
|
| 519 |
-
|
| 520 |
-
|
|
|
|
|
|
|
| 521 |
|
| 522 |
-
|
| 523 |
-
|
| 524 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 525 |
|
| 526 |
-
|
| 527 |
-
|
| 528 |
|
| 529 |
-
|
| 530 |
|
| 531 |
-
|
| 532 |
|
| 533 |
-
""
|
| 534 |
|
| 535 |
-
""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 536 |
|
| 537 |
-
|
|
|
|
|
|
| 1 |
# -*- coding: utf-8 -*-
|
| 2 |
+
"""app.ipynb
|
| 3 |
|
| 4 |
Automatically generated by Colaboratory.
|
| 5 |
|
| 6 |
Original file is located at
|
| 7 |
+
https://colab.research.google.com/drive/1jcyaKIPrnAlKT1SCk8TlzLNezLvRcM1s
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
"""
|
| 9 |
|
| 10 |
!sudo apt-get install -q -y timidity libsndfile1
|
|
|
|
|
|
|
| 11 |
!pip install pydub numba==0.48 librosa music21
|
| 12 |
+
!pip install -q gradio
|
| 13 |
+
|
| 14 |
+
import gradio as gr
|
| 15 |
|
| 16 |
import tensorflow as tf
|
| 17 |
import tensorflow_hub as hub
|
|
|
|
| 33 |
|
| 34 |
import music21
|
| 35 |
from pydub import AudioSegment
|
| 36 |
+
from IPython.core.display import display, HTML, Javascript
|
| 37 |
+
import json, random
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
|
| 39 |
EXPECTED_SAMPLE_RATE = 16000
|
| 40 |
+
MAX_ABS_INT16 = 32768.0
|
| 41 |
+
A4 = 440
|
| 42 |
+
C0 = A4 * pow(2, -4.75)
|
| 43 |
+
note_names = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
|
| 45 |
MAX_ABS_INT16 = 32768.0
|
| 46 |
|
|
|
|
| 56 |
librosadisplay.specshow(data=x_stft_db, y_axis='log', sr=sample_rate)
|
| 57 |
|
| 58 |
plt.colorbar(format='%+2.0f dB')
|
| 59 |
+
return fig
|
| 60 |
|
| 61 |
+
def showScore(score):
|
| 62 |
+
xml = open(score.write('musicxml')).read()
|
| 63 |
+
showMusicXML(xml)
|
| 64 |
+
|
| 65 |
+
def showMusicXML(score):
|
| 66 |
+
xml = open(score.write('musicxml')).read()
|
| 67 |
+
DIV_ID = "OSMD_div"
|
| 68 |
+
display(HTML('<div id="'+DIV_ID+'">loading OpenSheetMusicDisplay</div>'))
|
| 69 |
+
script = """
|
| 70 |
+
var div_id = {{DIV_ID}};
|
| 71 |
+
function loadOSMD() {
|
| 72 |
+
return new Promise(function(resolve, reject){
|
| 73 |
+
if (window.opensheetmusicdisplay) {
|
| 74 |
+
return resolve(window.opensheetmusicdisplay)
|
| 75 |
+
}
|
| 76 |
+
// OSMD script has a 'define' call which conflicts with requirejs
|
| 77 |
+
var _define = window.define // save the define object
|
| 78 |
+
window.define = undefined // now the loaded script will ignore requirejs
|
| 79 |
+
var s = document.createElement( 'script' );
|
| 80 |
+
s.setAttribute( 'src', "https://cdn.jsdelivr.net/npm/opensheetmusicdisplay@0.7.6/build/opensheetmusicdisplay.min.js" );
|
| 81 |
+
//s.setAttribute( 'src', "/custom/opensheetmusicdisplay.js" );
|
| 82 |
+
s.onload=function(){
|
| 83 |
+
window.define = _define
|
| 84 |
+
resolve(opensheetmusicdisplay);
|
| 85 |
+
};
|
| 86 |
+
document.body.appendChild( s ); // browser will try to load the new script tag
|
| 87 |
+
})
|
| 88 |
+
}
|
| 89 |
+
loadOSMD().then((OSMD)=>{
|
| 90 |
+
window.openSheetMusicDisplay = new OSMD.OpenSheetMusicDisplay(div_id, {
|
| 91 |
+
drawingParameters: "compacttight"
|
| 92 |
+
});
|
| 93 |
+
openSheetMusicDisplay
|
| 94 |
+
.load({{data}})
|
| 95 |
+
.then(
|
| 96 |
+
function() {
|
| 97 |
+
openSheetMusicDisplay.render();
|
| 98 |
+
}
|
| 99 |
+
);
|
| 100 |
+
})
|
| 101 |
+
""".replace('{{DIV_ID}}',DIV_ID).replace('{{data}}',json.dumps(xml))
|
| 102 |
+
display(Javascript(script))
|
| 103 |
+
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 104 |
|
| 105 |
def output2hz(pitch_output):
|
| 106 |
# Constants taken from https://tfhub.dev/google/spice/2
|
|
|
|
| 110 |
BINS_PER_OCTAVE = 12.0;
|
| 111 |
cqt_bin = pitch_output * PT_SLOPE + PT_OFFSET;
|
| 112 |
return FMIN * 2.0 ** (1.0 * cqt_bin / BINS_PER_OCTAVE)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 113 |
|
| 114 |
def quantize_predictions(group, ideal_offset):
|
| 115 |
# Group values are either 0, or a pitch in Hz.
|
|
|
|
| 159 |
|
| 160 |
return quantization_error, notes_and_rests
|
| 161 |
|
| 162 |
+
def convert_audio_for_model(user_file, output_file='converted_audio_file.wav'):
|
| 163 |
+
audio = AudioSegment.from_file(user_file)
|
| 164 |
+
audio = audio.set_frame_rate(EXPECTED_SAMPLE_RATE).set_channels(1)
|
| 165 |
+
audio.export(output_file, format="wav")
|
| 166 |
+
return output_file
|
| 167 |
|
| 168 |
+
def hz2offset(freq):
|
| 169 |
+
# This measures the quantization error for a single note.
|
| 170 |
+
if freq == 0: # Rests always have zero error.
|
| 171 |
+
return None
|
| 172 |
+
# Quantized note.
|
| 173 |
+
h = round(12 * math.log2(freq / C0))
|
| 174 |
+
return 12 * math.log2(freq / C0) - h
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 175 |
|
| 176 |
+
def greet(uploaded_file_name):
|
| 177 |
|
| 178 |
+
converted_audio_file = convert_audio_for_model(uploaded_file_name)
|
| 179 |
+
sample_rate, audio_samples = wavfile.read(converted_audio_file, 'rb')
|
| 180 |
+
audio_samples = audio_samples / float(MAX_ABS_INT16)
|
| 181 |
|
| 182 |
+
model = hub.load("https://tfhub.dev/google/spice/2")
|
| 183 |
+
model_output = model.signatures["serving_default"](tf.constant(audio_samples, tf.float32))
|
| 184 |
|
| 185 |
+
pitch_outputs = model_output["pitch"]
|
| 186 |
+
uncertainty_outputs = model_output["uncertainty"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 187 |
|
| 188 |
+
# 'Uncertainty' basically means the inverse of confidence.
|
| 189 |
+
confidence_outputs = 1.0 - uncertainty_outputs
|
|
|
|
|
|
|
|
|
|
|
|
|
| 190 |
|
| 191 |
+
confidence_outputs = list(confidence_outputs)
|
| 192 |
+
pitch_outputs = [ float(x) for x in pitch_outputs]
|
| 193 |
|
| 194 |
+
indices = range(len (pitch_outputs))
|
| 195 |
+
confident_pitch_outputs = [ (i,p)
|
| 196 |
+
for i, p, c in zip(indices, pitch_outputs, confidence_outputs) if c >= 0.9 ]
|
| 197 |
+
confident_pitch_outputs_x, confident_pitch_outputs_y = zip(*confident_pitch_outputs)
|
| 198 |
|
| 199 |
+
pitch_outputs_and_rests = [
|
| 200 |
+
output2hz(p) if c >= 0.9 else 0
|
| 201 |
+
for i, p, c in zip(indices, pitch_outputs, confidence_outputs)
|
| 202 |
+
]
|
| 203 |
+
|
| 204 |
+
offsets = [hz2offset(p) for p in pitch_outputs_and_rests if p != 0]
|
| 205 |
+
|
| 206 |
+
ideal_offset = statistics.mean(offsets)
|
| 207 |
+
|
| 208 |
+
best_error = float("inf")
|
| 209 |
+
best_notes_and_rests = None
|
| 210 |
+
best_predictions_per_note = None
|
| 211 |
+
|
| 212 |
+
for predictions_per_note in range(20, 65, 1):
|
| 213 |
+
for prediction_start_offset in range(predictions_per_note):
|
| 214 |
+
|
| 215 |
+
error, notes_and_rests = get_quantization_and_error(
|
| 216 |
+
pitch_outputs_and_rests, predictions_per_note,
|
| 217 |
+
prediction_start_offset, ideal_offset)
|
| 218 |
+
|
| 219 |
+
if error < best_error:
|
| 220 |
+
best_error = error
|
| 221 |
+
best_notes_and_rests = notes_and_rests
|
| 222 |
+
best_predictions_per_note = predictions_per_note
|
| 223 |
+
|
| 224 |
+
# At this point, best_notes_and_rests contains the best quantization.
|
| 225 |
+
# Since we don't need to have rests at the beginning, let's remove these:
|
| 226 |
+
while best_notes_and_rests[0] == 'Rest':
|
| 227 |
+
best_notes_and_rests = best_notes_and_rests[1:]
|
| 228 |
+
# Also remove silence at the end.
|
| 229 |
+
while best_notes_and_rests[-1] == 'Rest':
|
| 230 |
+
best_notes_and_rests = best_notes_and_rests[:-1]
|
| 231 |
+
|
| 232 |
+
sc = music21.stream.Score()
|
| 233 |
+
# Adjust the speed to match the actual singing.
|
| 234 |
+
bpm = 60 * 60 / best_predictions_per_note
|
| 235 |
+
print ('bpm: ', bpm)
|
| 236 |
+
a = music21.tempo.MetronomeMark(number=bpm)
|
| 237 |
+
sc.insert(0,a)
|
| 238 |
+
|
| 239 |
+
for snote in best_notes_and_rests:
|
| 240 |
+
d = 'half'
|
| 241 |
+
if snote == 'Rest':
|
| 242 |
+
sc.append(music21.note.Rest(type=d))
|
| 243 |
+
else:
|
| 244 |
+
sc.append(music21.note.Note(snote, type=d))
|
| 245 |
+
|
| 246 |
+
converted_audio_file_as_midi = converted_audio_file[:-4] + '.mid'
|
| 247 |
+
fp = sc.write('midi', fp=converted_audio_file_as_midi)
|
| 248 |
+
|
| 249 |
+
wav_from_created_midi = converted_audio_file_as_midi.replace(' ', '_') + "_midioutput.wav"
|
| 250 |
+
!timidity $converted_audio_file_as_midi -Ow -o $wav_from_created_midi
|
| 251 |
+
|
| 252 |
+
#return Audio(wav_from_created_midi)
|
| 253 |
+
Audio(wav_from_created_midi)
|
| 254 |
+
# ------- PLOT 1 -------
|
| 255 |
+
fig1 = plt.figure()
|
| 256 |
+
plt.plot(audio_samples)
|
| 257 |
+
|
| 258 |
+
# ------- PLOT 2 -------
|
| 259 |
+
fig2, ax = plt.subplots()
|
| 260 |
+
fig2.set_size_inches(90, 50)
|
| 261 |
+
plt.plot(pitch_outputs, label='pitch')
|
| 262 |
+
plt.plot(confidence_outputs, label='confidence')
|
| 263 |
+
plt.legend(loc="lower right")
|
| 264 |
+
|
| 265 |
+
# ------- PLOT 3 -------
|
| 266 |
+
x = audio_samples / MAX_ABS_INT16
|
| 267 |
+
sample_rate = EXPECTED_SAMPLE_RATE
|
| 268 |
+
show_black_and_white=False
|
| 269 |
|
| 270 |
+
x_stft = np.abs(librosa.stft(x, n_fft=2048))
|
| 271 |
+
fig3, ax1 = plt.subplots()
|
| 272 |
+
fig3.set_size_inches(20, 10)
|
| 273 |
+
x_stft_db = librosa.amplitude_to_db(x_stft, ref=np.max)
|
| 274 |
+
if(show_black_and_white):
|
| 275 |
+
librosadisplay.specshow(data=x_stft_db, y_axis='log',
|
| 276 |
+
sr=sample_rate, cmap='gray_r')
|
| 277 |
+
else:
|
| 278 |
+
librosadisplay.specshow(data=x_stft_db, y_axis='log', sr=sample_rate)
|
| 279 |
|
| 280 |
+
# -------PLOT 4 -------
|
| 281 |
+
fig4, ax2 = plt.subplots()
|
| 282 |
+
fig4.set_size_inches(20, 10)
|
| 283 |
+
ax2.set_ylim([0, 1])
|
| 284 |
+
plt.scatter(confident_pitch_outputs_x, confident_pitch_outputs_y, )
|
| 285 |
+
plt.scatter(confident_pitch_outputs_x, confident_pitch_outputs_y, c="r")
|
| 286 |
|
| 287 |
+
# ------- PLOT 5 -------
|
| 288 |
+
x = audio_samples / MAX_ABS_INT16
|
| 289 |
+
sample_rate = EXPECTED_SAMPLE_RATE
|
| 290 |
+
show_black_and_white=True
|
| 291 |
|
| 292 |
+
x_stft = np.abs(librosa.stft(x, n_fft=2048))
|
| 293 |
+
fig5, ax3 = plt.subplots()
|
| 294 |
+
fig5.set_size_inches(20, 10)
|
| 295 |
+
x_stft_db = librosa.amplitude_to_db(x_stft, ref=np.max)
|
| 296 |
+
if(show_black_and_white):
|
| 297 |
+
librosadisplay.specshow(data=x_stft_db, y_axis='log',
|
| 298 |
+
sr=sample_rate, cmap='gray_r')
|
| 299 |
+
else:
|
| 300 |
+
librosadisplay.specshow(data=x_stft_db, y_axis='log', sr=sample_rate)
|
| 301 |
|
| 302 |
+
confident_pitch_values_hz = [ output2hz(p) for p in confident_pitch_outputs_y ]
|
| 303 |
+
plt.scatter(confident_pitch_outputs_x, confident_pitch_values_hz, c="r")
|
| 304 |
|
| 305 |
+
return fig1,fig2,fig3,fig4,fig5,wav_from_created_midi #uploaded_file_name
|
| 306 |
|
| 307 |
+
#audio = gr.inputs.Audio(source="upload",type='filepath')
|
| 308 |
|
| 309 |
+
audio= gr.inputs.Audio(source="upload",type='filepath')
|
| 310 |
|
| 311 |
+
out = gr.outputs.Audio(type="auto", label='Salida')
|
| 312 |
+
fig1 = gr.outputs.Plot(type="auto")
|
| 313 |
+
fig2 = gr.outputs.Plot(type="auto")
|
| 314 |
+
fig3 = gr.outputs.Plot(type="auto")
|
| 315 |
+
fig4 = gr.outputs.Plot(type="auto")
|
| 316 |
+
fig5 = gr.outputs.Plot(type="auto")
|
| 317 |
|
| 318 |
+
iface = gr.Interface(fn=greet, inputs=audio, outputs=[fig1,fig2,fig3,fig4,fig5,out])
|
| 319 |
+
iface.launch(debug=True)
|