azunre's picture
Update app.py
980366a verified
import gradio as gr
from gradio_client import Client, handle_file
client = Client("Ghana-NLP/Northern-Ghana-ASR")
demo = gr.Blocks()
title = "# Khaya AI: Speech Recognition for Languages of Northern Ghana"
description = """
<b>How to use:</b> <br>
SPECIFY INPUT LANGUAGE & Speak through mic (or upload a file of same).<br>
MAXIMUM 30 seconds please.<br>
This model does not execute capitalization and punctuation.<br>
"""
def transcribe_wrapper_fn(audio, LANG):
result = client.predict(
audio=handle_file(audio),
LANG=LANG,
api_name="/predict"
)
return result
mic_transcribe = gr.Interface(
fn=transcribe_wrapper_fn,
inputs=[gr.Audio(sources=["microphone"], type="filepath"), gr.Dropdown(["Gonja", "Mampruli", "Gurene","Dagbani","Dagaare","Konkomba (Likpakpaanl)","Wali","Konkomba (Likoonli)","Kasem","African English"], value="Gonja", multiselect=False, label="Language", info="Select Lamguage")],
outputs=gr.Textbox(),
)
file_transcribe = gr.Interface(
fn=transcribe_wrapper_fn,
inputs=[gr.Audio(sources=["upload"], type="filepath"), gr.Dropdown(["Gonja", "Mampruli", "Gurene","Dagbani","Dagaare","Konkomba (Likpakpaanl)","Wali","Konkomba (Likoonli)","Kasem","African English"], value="Gonja", multiselect=False, label="Language", info="Select Lamguage")],
outputs=gr.Textbox(),
)
with demo:
gr.Markdown(title)
gr.Markdown(description)
gr.TabbedInterface(
[mic_transcribe, file_transcribe],
["Transcribe Microphone", "Transcribe Audio File"],
)
if __name__ == "__main__":
demo.queue(max_size=20).launch()