Spaces:
Sleeping
Sleeping
File size: 1,762 Bytes
f1c57cb c93a9e4 f1c57cb c93a9e4 f1c57cb c93a9e4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 | import gradio as gr
# Import libraries
import numpy as np
import matplotlib.pyplot as plt
import librosa
import librosa.display
import os
from fastai.vision.all import *
from PIL import Image
def fig2img(fig):
"""Convert a Matplotlib figure to a PIL Image and return it"""
import io
buf = io.BytesIO()
fig.savefig(buf)
buf.seek(0)
img = Image.open(buf)
return img
# Define function to convert given audio file to spectogram
def audio_to_spectogram(audio_path, save_path=None):
"""Computes the spectogram for given audio_path and saves spectogram as a image into save_path"""
y, sr = librosa.load(audio_path, sr=None)
# Compute the spectrogram
D = librosa.amplitude_to_db(np.abs(librosa.stft(y)), ref=np.max)
plt.figure(figsize=(10,4))
librosa.display.specshow(D, sr=sr, x_axis='time', y_axis='log')
plt.colorbar(format='%+2.0f dB')
plt.title('Spectrogram')
if save_path is not None:
# Save the figure as an image
plt.savefig(save_path)
plt.close()
else:
fig = plt.gcf()
return fig2img(fig)
def label_fn(x): return x.parent.name
categories = ('claps', 'click', 'cymbals', 'hats_closed', 'hats_open', 'kicks', 'percussion',
'rides', 'rimshot', 'shakers', 'snaps', 'snares', 'tambourines', 'toms')
learn = load_learner('sample-classifier-model-01.pkl')
def classify_image(audio):
audio_to_spectogram(audio, save_path="spect.png")
pred, idx, probs = learn.predict(PILImage.create("spect.png"))
return dict(zip(categories, map(float, probs)))
audio = gr.components.Audio(type='filepath')
label = gr.outputs.Label()
iface = gr.Interface(fn=classify_image, inputs=audio, outputs=label)
iface.launch(inline=False) |