File size: 3,252 Bytes
def2a82
 
 
cef3366
def2a82
 
 
 
 
 
 
 
 
 
 
cef3366
 
 
 
354513a
 
 
 
 
 
 
cef3366
 
354513a
def2a82
 
 
 
 
cef3366
def2a82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
831fa7f
 
 
 
 
 
 
def2a82
 
831fa7f
def2a82
 
831fa7f
def2a82
 
 
 
 
 
 
831fa7f
def2a82
 
 
 
 
d55f821
def2a82
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import gradio as gr
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer, Wav2Vec2ForCTC, Wav2Vec2Processor
import torch
import torchaudio
from gtts import gTTS
import os

# Load models
asr_model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h")
asr_processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-960h")
chat_model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
chat_tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")

# Function: Speech-to-Text (STT)
def speech_to_text(audio):
    if not audio:
        return "No audio provided."

    # Load the audio file
    input_audio, original_sample_rate = torchaudio.load(audio)

    # Resample the audio to 16,000 Hz if necessary
    target_sample_rate = 16000
    if original_sample_rate != target_sample_rate:
        resampler = torchaudio.transforms.Resample(orig_freq=original_sample_rate, new_freq=target_sample_rate)
        input_audio = resampler(input_audio)

    # Process the audio for ASR
    input_audio = asr_processor(input_audio, sampling_rate=target_sample_rate, return_tensors="pt", padding=True)
    logits = asr_model(input_audio.input_values).logits
    predicted_ids = torch.argmax(logits, dim=-1)
    transcription = asr_processor.decode(predicted_ids[0])
    return transcription


# Function: Generate chatbot response
def chatbot_response(user_input, history):
    inputs = chat_tokenizer.encode(history + user_input + chat_tokenizer.eos_token, return_tensors="pt")
    response_ids = chat_model.generate(inputs, max_length=500, pad_token_id=chat_tokenizer.eos_token_id)
    response = chat_tokenizer.decode(response_ids[:, inputs.shape[-1]:][0], skip_special_tokens=True)
    return response

# Function: Text-to-Speech (TTS)
def text_to_speech(text, filename="response.mp3"):
    tts = gTTS(text=text, lang="en")
    tts.save(filename)
    return filename

# Main Chat Function
def englishia(audio, history=""):
    # Step 1: Check if audio is provided
    if not audio:
        response = "I didn't catch that. Please try speaking again."
        audio_response = text_to_speech(response)
        return audio_response, history

    # Step 2: Convert speech to text
    user_text = speech_to_text(audio)

    # Step 3: Generate chatbot response
    bot_response = chatbot_response(user_text, history)

    # Step 4: Convert chatbot response to speech
    audio_response = text_to_speech(bot_response)

    # Update conversation history
    history += f"User: {user_text}\nEnglishia: {bot_response}\n"

    return audio_response, history


# Gradio Interface
with gr.Blocks() as englishia_interface:
    gr.Markdown("# Welcome to Englishia: Your English Practice Assistant")

    with gr.Row():
        user_audio = gr.Audio(type="filepath", label="Speak to Englishia")
        chatbot_output = gr.Audio(label="Englishia's Response")
    
    conversation_history = gr.Textbox(label="Conversation History", lines=10, interactive=False)

    submit_button = gr.Button("Submit")
    submit_button.click(
        englishia,
        inputs=[user_audio, conversation_history],
        outputs=[chatbot_output, conversation_history]
    )

# Launch the App
englishia_interface.launch()