| import streamlit as st |
| from transformers import pipeline |
| import soundfile as sf |
|
|
| |
| translation_model = pipeline("translation_en_to_ur", model="Helsinki-NLP/opus-mt-en-ur") |
| reverse_translation_model = pipeline("translation_ur_to_en", model="Helsinki-NLP/opus-mt-ur-en") |
|
|
| |
| tts_model = pipeline("text-to-speech", model="facebook/fastspeech2-en-ljspeech") |
|
|
| |
| def translate_and_speak(text, direction): |
| if direction == "English to Urdu": |
| translated_text = translation_model(text)[0]['translation_text'] |
| else: |
| translated_text = reverse_translation_model(text)[0]['translation_text'] |
| |
| |
| audio = tts_model(translated_text) |
| audio_path = "output.wav" |
| sf.write(audio_path, audio["array"], 22050) |
| return translated_text, audio_path |
|
|
| |
| st.title("AI-Powered Language Tutor") |
| st.write("An interactive tutor to help you practice English-Urdu translations with speech feedback!") |
|
|
| |
| text_input = st.text_area("Enter Text", "Hello, how are you?") |
| direction = st.radio("Choose Translation Direction", ["English to Urdu", "Urdu to English"]) |
|
|
| |
| if st.button("Translate and Speak"): |
| translated_text, audio_path = translate_and_speak(text_input, direction) |
| |
| |
| st.subheader("Translated Text:") |
| st.write(translated_text) |
| |
| st.subheader("Generated Speech:") |
| st.audio(audio_path, format="audio/wav") |
|
|