File size: 763 Bytes
9989cd9 716eb53 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 |
import gradio as gr
import torchaudio
from transformers import pipeline, AudioClassificationPipeline
pipe = pipeline("audio-classification", model="cogniveon/eeem069_heart_murmur_classification")
def predict(audio):
sampling_rate, data = audio
waveform = torch.tensor(data).float()
# Resample the audio to 16 kHz (if necessary)
if sampling_rate != 16000:
resampler = torchaudio.transforms.Resample(sampling_rate, 16000)
waveform = resampler(waveform)
results = pipe(waveform.numpy())
sorted_results = sorted(results, key=lambda x: x['score'], reverse=True)
label_scores = {item['label']: item['score'] for item in sorted_results}
return label_scores
gr.Interface(
fn=predict,
inputs="audio",
outputs="label"
).launch() |