File size: 486 Bytes
1e50356
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
import gradio as gr
from model import CnnVoiceClassifier

model = CnnVoiceClassifier()

audio_component = gr.Audio(type='filepath', label='Upload your audio file here')
label_component = gr.Label(label='Gender classification result')

sample_female = [f'female_{i}.wav' for i in range(1, 5)]
sample_male = [f'male_{i}.wav' for i in range(1, 5)]

demo = gr.Interface(fn=model.inference, inputs=audio_component, outputs=label_component, examples=sample_female + sample_male)
demo.launch()