File size: 1,612 Bytes
65cb2f5 fb6f0c7 65cb2f5 fb6f0c7 41982c1 980366a 41982c1 b48f1a1 65cb2f5 2322192 b48f1a1 65cb2f5 41c3948 65cb2f5 fb6f0c7 65cb2f5 fb6f0c7 65cb2f5 41982c1 65cb2f5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 | import gradio as gr
from gradio_client import Client, handle_file
client = Client("Ghana-NLP/Northern-Ghana-ASR")
demo = gr.Blocks()
title = "# Khaya AI: Speech Recognition for Languages of Northern Ghana"
description = """
<b>How to use:</b> <br>
SPECIFY INPUT LANGUAGE & Speak through mic (or upload a file of same).<br>
MAXIMUM 30 seconds please.<br>
This model does not execute capitalization and punctuation.<br>
"""
def transcribe_wrapper_fn(audio, LANG):
result = client.predict(
audio=handle_file(audio),
LANG=LANG,
api_name="/predict"
)
return result
mic_transcribe = gr.Interface(
fn=transcribe_wrapper_fn,
inputs=[gr.Audio(sources=["microphone"], type="filepath"), gr.Dropdown(["Gonja", "Mampruli", "Gurene","Dagbani","Dagaare","Konkomba (Likpakpaanl)","Wali","Konkomba (Likoonli)","Kasem","African English"], value="Gonja", multiselect=False, label="Language", info="Select Lamguage")],
outputs=gr.Textbox(),
)
file_transcribe = gr.Interface(
fn=transcribe_wrapper_fn,
inputs=[gr.Audio(sources=["upload"], type="filepath"), gr.Dropdown(["Gonja", "Mampruli", "Gurene","Dagbani","Dagaare","Konkomba (Likpakpaanl)","Wali","Konkomba (Likoonli)","Kasem","African English"], value="Gonja", multiselect=False, label="Language", info="Select Lamguage")],
outputs=gr.Textbox(),
)
with demo:
gr.Markdown(title)
gr.Markdown(description)
gr.TabbedInterface(
[mic_transcribe, file_transcribe],
["Transcribe Microphone", "Transcribe Audio File"],
)
if __name__ == "__main__":
demo.queue(max_size=20).launch()
|