File size: 1,419 Bytes
be917bf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
# Import the Gradio library for creating web interfaces
import gradio as gr
# Import the pipeline module from transformers for using pre-trained models
from transformers import pipeline
# Import numpy for numerical operations
import numpy as np

# Initialize the automatic speech recognition pipeline using the Whisper base English model
transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-base.en")

# Define the transcription function that takes audio input and returns transcribed text
def transcribe(stream,new_chunk):
    # Unpack the audio tuple into sample rate (sr) and audio data (y)
    sr, y = new_chunk
    # Convert the audio data to 32-bit float
    y = y.astype(np.float32)
    # Normalize the audio data to be between -1 and 1
    y /= np.max(np.abs(y))

    if stream is not None:
        stream = np.concatenate([stream, y])
    else:
        stream = y 

    # Use the transcriber to convert audio to text and return the result
    return stream, transcriber({"sampling_rate": sr, "raw": stream})["text"]


# Create a Gradio interface for the transcribe function
demo = gr.Interface(
    # Specify the function to run
    transcribe,
    # Define the input component as an audio recorder with microphone source
    ["state", gr.Audio(sources=["microphone"], streaming=True)],
    # Specify the output component as text
    ["state", "text"],
    live = True 
)

demo.launch()