hackaton2 / app.py
akazmi's picture
Update app.py
417eb56 verified
raw
history blame
1.69 kB
import streamlit as st
from transformers import pipeline
import soundfile as sf
# Initialize translation model (English <-> Urdu)
translation_model = pipeline("translation_en_to_ur", model="Helsinki-NLP/opus-mt-en-ur")
reverse_translation_model = pipeline("translation_ur_to_en", model="Helsinki-NLP/opus-mt-ur-en")
# Initialize text-to-speech model from Hugging Face
tts_model = pipeline("text-to-speech", model="facebook/fastspeech2-en-ljspeech")
# Function to translate text and provide feedback
def translate_and_speak(text, direction):
if direction == "English to Urdu":
translated_text = translation_model(text)[0]['translation_text']
else:
translated_text = reverse_translation_model(text)[0]['translation_text']
# Use TTS to synthesize speech from translated text
audio = tts_model(translated_text)
audio_path = "output.wav"
sf.write(audio_path, audio["array"], 22050) # Save audio to file
return translated_text, audio_path
# Streamlit app UI
st.title("AI-Powered Language Tutor")
st.write("An interactive tutor to help you practice English-Urdu translations with speech feedback!")
# User input for translation
text_input = st.text_area("Enter Text", "Hello, how are you?")
direction = st.radio("Choose Translation Direction", ["English to Urdu", "Urdu to English"])
# Button to process the text and play audio
if st.button("Translate and Speak"):
translated_text, audio_path = translate_and_speak(text_input, direction)
# Display translated text and audio
st.subheader("Translated Text:")
st.write(translated_text)
st.subheader("Generated Speech:")
st.audio(audio_path, format="audio/wav")