File size: 2,910 Bytes
68f0220
42b415b
68f0220
42b415b
52c0097
42b415b
68f0220
 
42b415b
0fed57d
42b415b
 
 
 
 
 
 
68f0220
42b415b
 
 
 
 
 
68f0220
42b415b
 
 
 
 
 
68f0220
42b415b
 
 
 
 
 
 
 
68f0220
42b415b
 
68f0220
42b415b
 
 
 
 
 
52c0097
0fed57d
42b415b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import gradio as gr
import json
from transformers import pipeline
import torch
from gtts import gTTS
import tempfile
import os

# Load language codes from JSON
with open("lang_code.json", "r", encoding="utf-8") as f:
    data = json.load(f)

# Convert list โ†’ dict if needed
if isinstance(data, list):
    LANG_CODES = {item["Language"]: item["FLORES-200 code"] for item in data}
else:
    LANG_CODES = data

# Load translation pipeline
translator = pipeline(
    "translation",
    model="facebook/nllb-200-distilled-600M",
    device=0 if torch.cuda.is_available() else -1
)

# Text translation function
def translate_text(text, src_lang, tgt_lang):
    src_code = LANG_CODES[src_lang]
    tgt_code = LANG_CODES[tgt_lang]
    result = translator(text, src_lang=src_code, tgt_lang=tgt_code)
    return result[0]['translation_text']

# Speech translation function
def translate_speech(audio, src_lang, tgt_lang):
    if audio is None:
        return None, "Please provide an audio file."
    
    # Convert speech to text (using Whisper)
    asr = pipeline("automatic-speech-recognition", model="openai/whisper-base")
    transcription = asr(audio)["text"]

    # Translate the text
    translated_text = translate_text(transcription, src_lang, tgt_lang)

    # Convert translated text to speech
    tts = gTTS(text=translated_text, lang='en')  # gTTS uses ISO-639-1, adjust as needed
    temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
    tts.save(temp_file.name)

    return temp_file.name, translated_text

# Gradio UI
with gr.Blocks() as demo:
    gr.Markdown("## ๐ŸŒ Multilingual Text & Speech Translator")

    with gr.Tab("Text Translation"):
        src_lang = gr.Dropdown(choices=list(LANG_CODES.keys()), value="English (Latin script)", label="Source Language")
        tgt_lang = gr.Dropdown(choices=list(LANG_CODES.keys()), value="French", label="Target Language")
        input_text = gr.Textbox(label="Enter text to translate")
        output_text = gr.Textbox(label="Translated text")
        translate_btn = gr.Button("Translate")
        translate_btn.click(translate_text, inputs=[input_text, src_lang, tgt_lang], outputs=output_text)

    with gr.Tab("Speech Translation"):
        src_lang_s = gr.Dropdown(choices=list(LANG_CODES.keys()), value="English (Latin script)", label="Source Language")
        tgt_lang_s = gr.Dropdown(choices=list(LANG_CODES.keys()), value="French", label="Target Language")
        audio_input = gr.Audio(sources=["microphone", "upload"], type="filepath")
        audio_output = gr.Audio(label="Translated Speech")
        translated_text_output = gr.Textbox(label="Translated Text")
        translate_speech_btn = gr.Button("Translate Speech")
        translate_speech_btn.click(translate_speech, inputs=[audio_input, src_lang_s, tgt_lang_s], outputs=[audio_output, translated_text_output])

if __name__ == "__main__":
    demo.launch()