|
|
import gradio as gr |
|
|
import json |
|
|
from transformers import pipeline |
|
|
import torch |
|
|
from gtts import gTTS |
|
|
import tempfile |
|
|
import os |
|
|
|
|
|
|
|
|
with open("lang_code.json", "r", encoding="utf-8") as f: |
|
|
data = json.load(f) |
|
|
|
|
|
|
|
|
if isinstance(data, list): |
|
|
LANG_CODES = {item["Language"]: item["FLORES-200 code"] for item in data} |
|
|
else: |
|
|
LANG_CODES = data |
|
|
|
|
|
|
|
|
translator = pipeline( |
|
|
"translation", |
|
|
model="facebook/nllb-200-distilled-600M", |
|
|
device=0 if torch.cuda.is_available() else -1 |
|
|
) |
|
|
|
|
|
|
|
|
def translate_text(text, src_lang, tgt_lang): |
|
|
src_code = LANG_CODES[src_lang] |
|
|
tgt_code = LANG_CODES[tgt_lang] |
|
|
result = translator(text, src_lang=src_code, tgt_lang=tgt_code) |
|
|
return result[0]['translation_text'] |
|
|
|
|
|
|
|
|
def translate_speech(audio, src_lang, tgt_lang): |
|
|
if audio is None: |
|
|
return None, "Please provide an audio file." |
|
|
|
|
|
|
|
|
asr = pipeline("automatic-speech-recognition", model="openai/whisper-base") |
|
|
transcription = asr(audio)["text"] |
|
|
|
|
|
|
|
|
translated_text = translate_text(transcription, src_lang, tgt_lang) |
|
|
|
|
|
|
|
|
tts = gTTS(text=translated_text, lang='en') |
|
|
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") |
|
|
tts.save(temp_file.name) |
|
|
|
|
|
return temp_file.name, translated_text |
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
gr.Markdown("## π Multilingual Text & Speech Translator") |
|
|
|
|
|
with gr.Tab("Text Translation"): |
|
|
src_lang = gr.Dropdown(choices=list(LANG_CODES.keys()), value="English (Latin script)", label="Source Language") |
|
|
tgt_lang = gr.Dropdown(choices=list(LANG_CODES.keys()), value="French", label="Target Language") |
|
|
input_text = gr.Textbox(label="Enter text to translate") |
|
|
output_text = gr.Textbox(label="Translated text") |
|
|
translate_btn = gr.Button("Translate") |
|
|
translate_btn.click(translate_text, inputs=[input_text, src_lang, tgt_lang], outputs=output_text) |
|
|
|
|
|
with gr.Tab("Speech Translation"): |
|
|
src_lang_s = gr.Dropdown(choices=list(LANG_CODES.keys()), value="English (Latin script)", label="Source Language") |
|
|
tgt_lang_s = gr.Dropdown(choices=list(LANG_CODES.keys()), value="French", label="Target Language") |
|
|
audio_input = gr.Audio(sources=["microphone", "upload"], type="filepath") |
|
|
audio_output = gr.Audio(label="Translated Speech") |
|
|
translated_text_output = gr.Textbox(label="Translated Text") |
|
|
translate_speech_btn = gr.Button("Translate Speech") |
|
|
translate_speech_btn.click(translate_speech, inputs=[audio_input, src_lang_s, tgt_lang_s], outputs=[audio_output, translated_text_output]) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch() |
|
|
|