Spaces:
Runtime error
Runtime error
Upload 3 files
Browse files- app.py +17 -8
- requirements.txt +1 -11
app.py
CHANGED
|
@@ -3,14 +3,23 @@
|
|
| 3 |
# %% auto 0
|
| 4 |
__all__ = ['learn', 'categories', 'aud', 'examples', 'intf', 'log_mel_spec_tfm', 'classify_aud']
|
| 5 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
# %% app.ipynb 1
|
| 7 |
-
def log_mel_spec_tfm(fname
|
| 8 |
-
|
| 9 |
-
y, sr = librosa.load(str(src_path/fname), mono=True)
|
| 10 |
D = librosa.amplitude_to_db(np.abs(librosa.stft(y)), ref=np.max)
|
| 11 |
img = librosa.display.specshow(D, y_axis='linear', x_axis='time',
|
| 12 |
sr=sr)
|
| 13 |
-
plt.savefig(str(
|
| 14 |
plt.close()
|
| 15 |
return img
|
| 16 |
|
|
@@ -18,18 +27,18 @@ def log_mel_spec_tfm(fname, src_path, dst_path):
|
|
| 18 |
learn = load_learner('model.pkl')
|
| 19 |
learn.remove_cb(ProgressCallback)
|
| 20 |
|
| 21 |
-
# %% app.ipynb
|
| 22 |
categories = ('Brass', 'Flute', 'Guitar', 'Keyboard', 'Mallet', 'Reed', 'String', 'Vocal')
|
| 23 |
|
| 24 |
def classify_aud(aud):
|
| 25 |
-
log_mel_spec_tfm(aud
|
| 26 |
img_fname = str(aud[:-4]) + '.png'
|
| 27 |
pred, idx, probs = learn.predict(img_fname)
|
| 28 |
return dict(zip(categories, map(float, probs)))
|
| 29 |
|
| 30 |
-
# %% app.ipynb
|
| 31 |
aud = gr.Audio(source="upload", type="numpy")
|
| 32 |
-
examples = [
|
| 33 |
|
| 34 |
intf = gr.Interface(fn = classify_aud, inputs = aud, outputs = "label", examples = examples)
|
| 35 |
intf.launch(inline = False)
|
|
|
|
| 3 |
# %% auto 0
|
| 4 |
__all__ = ['learn', 'categories', 'aud', 'examples', 'intf', 'log_mel_spec_tfm', 'classify_aud']
|
| 5 |
|
| 6 |
+
from fastai.vision.all import *
|
| 7 |
+
import librosa.display
|
| 8 |
+
import matplotlib.pyplot as plt
|
| 9 |
+
import numpy as np
|
| 10 |
+
from functools import partial
|
| 11 |
+
import pandas as pd
|
| 12 |
+
import librosa
|
| 13 |
+
from scipy.io import wavfile
|
| 14 |
+
import gradio as gr
|
| 15 |
+
|
| 16 |
# %% app.ipynb 1
|
| 17 |
+
def log_mel_spec_tfm(fname):
|
| 18 |
+
y, sr = librosa.load(fname, mono=True)
|
|
|
|
| 19 |
D = librosa.amplitude_to_db(np.abs(librosa.stft(y)), ref=np.max)
|
| 20 |
img = librosa.display.specshow(D, y_axis='linear', x_axis='time',
|
| 21 |
sr=sr)
|
| 22 |
+
plt.savefig(str(fname[:-4]) + '.png')
|
| 23 |
plt.close()
|
| 24 |
return img
|
| 25 |
|
|
|
|
| 27 |
learn = load_learner('model.pkl')
|
| 28 |
learn.remove_cb(ProgressCallback)
|
| 29 |
|
| 30 |
+
# %% app.ipynb 5
|
| 31 |
categories = ('Brass', 'Flute', 'Guitar', 'Keyboard', 'Mallet', 'Reed', 'String', 'Vocal')
|
| 32 |
|
| 33 |
def classify_aud(aud):
|
| 34 |
+
log_mel_spec_tfm(aud)
|
| 35 |
img_fname = str(aud[:-4]) + '.png'
|
| 36 |
pred, idx, probs = learn.predict(img_fname)
|
| 37 |
return dict(zip(categories, map(float, probs)))
|
| 38 |
|
| 39 |
+
# %% app.ipynb 6
|
| 40 |
aud = gr.Audio(source="upload", type="numpy")
|
| 41 |
+
examples = [f.name for f in Path('.').iterdir() if '.wav' in f.name]
|
| 42 |
|
| 43 |
intf = gr.Interface(fn = classify_aud, inputs = aud, outputs = "label", examples = examples)
|
| 44 |
intf.launch(inline = False)
|
requirements.txt
CHANGED
|
@@ -1,11 +1 @@
|
|
| 1 |
-
|
| 2 |
-
fastai
|
| 3 |
-
librosa
|
| 4 |
-
matplotlib
|
| 5 |
-
numpy
|
| 6 |
-
functools
|
| 7 |
-
pathlib
|
| 8 |
-
multiprocessing
|
| 9 |
-
os
|
| 10 |
-
scipy
|
| 11 |
-
gradio
|
|
|
|
| 1 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|