import gradio as gr import json from transformers import pipeline import torch from gtts import gTTS import tempfile import os # Load language codes from JSON with open("lang_code.json", "r", encoding="utf-8") as f: data = json.load(f) # Convert list → dict if needed if isinstance(data, list): LANG_CODES = {item["Language"]: item["FLORES-200 code"] for item in data} else: LANG_CODES = data # Load translation pipeline translator = pipeline( "translation", model="facebook/nllb-200-distilled-600M", device=0 if torch.cuda.is_available() else -1 ) # Text translation function def translate_text(text, src_lang, tgt_lang): src_code = LANG_CODES[src_lang] tgt_code = LANG_CODES[tgt_lang] result = translator(text, src_lang=src_code, tgt_lang=tgt_code) return result[0]['translation_text'] # Speech translation function def translate_speech(audio, src_lang, tgt_lang): if audio is None: return None, "Please provide an audio file." # Convert speech to text (using Whisper) asr = pipeline("automatic-speech-recognition", model="openai/whisper-base") transcription = asr(audio)["text"] # Translate the text translated_text = translate_text(transcription, src_lang, tgt_lang) # Convert translated text to speech tts = gTTS(text=translated_text, lang='en') # gTTS uses ISO-639-1, adjust as needed temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") tts.save(temp_file.name) return temp_file.name, translated_text # Gradio UI with gr.Blocks() as demo: gr.Markdown("## 🌐 Multilingual Text & Speech Translator") with gr.Tab("Text Translation"): src_lang = gr.Dropdown(choices=list(LANG_CODES.keys()), value="English (Latin script)", label="Source Language") tgt_lang = gr.Dropdown(choices=list(LANG_CODES.keys()), value="French", label="Target Language") input_text = gr.Textbox(label="Enter text to translate") output_text = gr.Textbox(label="Translated text") translate_btn = gr.Button("Translate") translate_btn.click(translate_text, inputs=[input_text, src_lang, tgt_lang], outputs=output_text) with gr.Tab("Speech Translation"): src_lang_s = gr.Dropdown(choices=list(LANG_CODES.keys()), value="English (Latin script)", label="Source Language") tgt_lang_s = gr.Dropdown(choices=list(LANG_CODES.keys()), value="French", label="Target Language") audio_input = gr.Audio(sources=["microphone", "upload"], type="filepath") audio_output = gr.Audio(label="Translated Speech") translated_text_output = gr.Textbox(label="Translated Text") translate_speech_btn = gr.Button("Translate Speech") translate_speech_btn.click(translate_speech, inputs=[audio_input, src_lang_s, tgt_lang_s], outputs=[audio_output, translated_text_output]) if __name__ == "__main__": demo.launch()