Spaces:
Sleeping
Sleeping
| # audio_classification_app.py | |
| import gradio as gr | |
| from transformers import pipeline | |
| # Pretrained model for audio classification | |
| MODEL = "superb/wav2vec2-base-superb-ks" # keyword spotting (yes, no, up, down...) | |
| classifier = pipeline("audio-classification", model=MODEL) | |
| def classify_audio(audio_file): | |
| # audio_file is a tuple: (sample_rate, numpy_array) if "numpy", or path if "filepath" | |
| if isinstance(audio_file, str): # filepath | |
| return classifier(audio_file) | |
| else: # (sr, data) | |
| sr, data = audio_file | |
| return classifier({"array": data, "sampling_rate": sr}) | |
| demo = gr.Interface( | |
| fn=classify_audio, | |
| inputs=gr.Audio(sources=["microphone", "upload"], type="filepath"), | |
| outputs=gr.Label(), | |
| title="🎵 Audio Classification", | |
| description="Upload or record audio. Model: wav2vec2-base-superb-ks" | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() | |