Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
import streamlit as st
|
|
|
|
| 2 |
from transformers import pipeline
|
| 3 |
import speech_recognition as sr
|
| 4 |
from gtts import gTTS
|
|
@@ -6,93 +7,72 @@ import tempfile
|
|
| 6 |
import os
|
| 7 |
import base64
|
| 8 |
|
| 9 |
-
#
|
| 10 |
@st.cache_resource
|
| 11 |
-
def
|
| 12 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
# Initialize speech recognition
|
| 15 |
recognizer = sr.Recognizer()
|
| 16 |
|
| 17 |
-
def translate_text(input_text, model):
|
| 18 |
-
return model(input_text)[0]["translation_text"]
|
| 19 |
-
|
| 20 |
def speech_to_text(audio_file):
|
| 21 |
with sr.AudioFile(audio_file) as source:
|
| 22 |
audio_data = recognizer.record(source)
|
| 23 |
return recognizer.recognize_google(audio_data)
|
| 24 |
|
|
|
|
|
|
|
|
|
|
| 25 |
def text_to_speech(text, language):
|
| 26 |
tts = gTTS(text=text, lang=language)
|
| 27 |
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
|
| 28 |
tts.save(temp_file.name)
|
| 29 |
return temp_file.name
|
| 30 |
|
| 31 |
-
# Streamlit
|
| 32 |
-
st.title("
|
| 33 |
-
st.write("Translate voice and text between multiple languages in real-time!")
|
| 34 |
|
| 35 |
-
#
|
| 36 |
st.sidebar.header("Settings")
|
| 37 |
-
input_lang = st.sidebar.selectbox("Select Input Language", ["
|
| 38 |
-
output_lang = st.sidebar.selectbox("Select Output Language", ["
|
| 39 |
-
|
| 40 |
-
# Language codes mapping
|
| 41 |
-
lang_codes = {
|
| 42 |
-
"English": "en",
|
| 43 |
-
"French": "fr",
|
| 44 |
-
"Spanish": "es",
|
| 45 |
-
"German": "de",
|
| 46 |
-
"Hindi": "hi"
|
| 47 |
-
}
|
| 48 |
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
# Model selection
|
| 53 |
-
model_name = f"Helsinki-NLP/opus-mt-{input_code}-{output_code}"
|
| 54 |
-
translation_pipeline = load_translation_pipeline(model_name)
|
| 55 |
|
| 56 |
# Input options
|
| 57 |
-
st.header("Input
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
|
| 73 |
-
# Provide download link
|
| 74 |
-
b64 = base64.b64encode(audio_bytes).decode()
|
| 75 |
-
href = f'<a href="data:audio/mp3;base64,{b64}" download="translation.mp3">Download Translated Audio</a>'
|
| 76 |
-
st.markdown(href, unsafe_allow_html=True)
|
| 77 |
else:
|
| 78 |
-
|
| 79 |
-
if audio_file is not None:
|
| 80 |
-
if st.button("Translate"):
|
| 81 |
-
try:
|
| 82 |
-
input_text = speech_to_text(audio_file)
|
| 83 |
-
st.write(f"Recognized Text in {input_lang}: {input_text}")
|
| 84 |
-
translated_text = translate_text(input_text, translation_pipeline)
|
| 85 |
-
st.success(f"Translated Text in {output_lang}: {translated_text}")
|
| 86 |
-
|
| 87 |
-
# Option to download translation as audio
|
| 88 |
-
if st.checkbox("Play Translated Audio"):
|
| 89 |
-
audio_file = text_to_speech(translated_text, output_code)
|
| 90 |
-
audio_bytes = open(audio_file, "rb").read()
|
| 91 |
-
st.audio(audio_bytes, format="audio/mp3")
|
| 92 |
-
|
| 93 |
-
# Provide download link
|
| 94 |
-
b64 = base64.b64encode(audio_bytes).decode()
|
| 95 |
-
href = f'<a href="data:audio/mp3;base64,{b64}" download="translation.mp3">Download Translated Audio</a>'
|
| 96 |
-
st.markdown(href, unsafe_allow_html=True)
|
| 97 |
-
except Exception as e:
|
| 98 |
-
st.error(f"Error: {e}")
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
+
import whisper
|
| 3 |
from transformers import pipeline
|
| 4 |
import speech_recognition as sr
|
| 5 |
from gtts import gTTS
|
|
|
|
| 7 |
import os
|
| 8 |
import base64
|
| 9 |
|
| 10 |
+
# Load Whisper model for Speech-to-Text
|
| 11 |
@st.cache_resource
|
| 12 |
+
def load_whisper_model():
|
| 13 |
+
return whisper.load_model("base") # You can use other models like "large" for better accuracy
|
| 14 |
+
|
| 15 |
+
# Load translation model from Hugging Face
|
| 16 |
+
@st.cache_resource
|
| 17 |
+
def load_translation_model(input_lang, output_lang):
|
| 18 |
+
model_name = f"Helsinki-NLP/opus-mt-{input_lang}-{output_lang}"
|
| 19 |
+
return pipeline("translation", model=model_name)
|
| 20 |
|
| 21 |
# Initialize speech recognition
|
| 22 |
recognizer = sr.Recognizer()
|
| 23 |
|
|
|
|
|
|
|
|
|
|
| 24 |
def speech_to_text(audio_file):
|
| 25 |
with sr.AudioFile(audio_file) as source:
|
| 26 |
audio_data = recognizer.record(source)
|
| 27 |
return recognizer.recognize_google(audio_data)
|
| 28 |
|
| 29 |
+
def translate_text(input_text, translation_pipeline):
|
| 30 |
+
return translation_pipeline(input_text)[0]["translation_text"]
|
| 31 |
+
|
| 32 |
def text_to_speech(text, language):
|
| 33 |
tts = gTTS(text=text, lang=language)
|
| 34 |
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
|
| 35 |
tts.save(temp_file.name)
|
| 36 |
return temp_file.name
|
| 37 |
|
| 38 |
+
# Streamlit UI
|
| 39 |
+
st.title("Voice-to-Voice Translator")
|
|
|
|
| 40 |
|
| 41 |
+
# Sidebar for user input
|
| 42 |
st.sidebar.header("Settings")
|
| 43 |
+
input_lang = st.sidebar.selectbox("Select Input Language", ["en", "fr", "es", "de", "hi"])
|
| 44 |
+
output_lang = st.sidebar.selectbox("Select Output Language", ["en", "fr", "es", "de", "hi"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
|
| 46 |
+
# Initialize models
|
| 47 |
+
whisper_model = load_whisper_model()
|
| 48 |
+
translation_pipeline = load_translation_model(input_lang, output_lang)
|
|
|
|
|
|
|
|
|
|
| 49 |
|
| 50 |
# Input options
|
| 51 |
+
st.header("Input your voice or text")
|
| 52 |
+
|
| 53 |
+
# Option to upload an audio file
|
| 54 |
+
audio_file = st.file_uploader("Upload an audio file (WAV format)", type=["wav"])
|
| 55 |
+
|
| 56 |
+
if audio_file is not None:
|
| 57 |
+
# Convert speech to text
|
| 58 |
+
input_text = speech_to_text(audio_file)
|
| 59 |
+
st.write(f"Recognized Text: {input_text}")
|
| 60 |
+
|
| 61 |
+
# Translate the text
|
| 62 |
+
translated_text = translate_text(input_text, translation_pipeline)
|
| 63 |
+
st.success(f"Translated Text: {translated_text}")
|
| 64 |
+
|
| 65 |
+
# Convert translated text to speech
|
| 66 |
+
output_audio = text_to_speech(translated_text, output_lang)
|
| 67 |
+
audio_bytes = open(output_audio, "rb").read()
|
| 68 |
+
|
| 69 |
+
# Play the audio
|
| 70 |
+
st.audio(audio_bytes, format="audio/mp3")
|
| 71 |
+
|
| 72 |
+
# Provide download link for audio
|
| 73 |
+
b64 = base64.b64encode(audio_bytes).decode()
|
| 74 |
+
href = f'<a href="data:audio/mp3;base64,{b64}" download="translated_audio.mp3">Download Translated Audio</a>'
|
| 75 |
+
st.markdown(href, unsafe_allow_html=True)
|
| 76 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
else:
|
| 78 |
+
st.write("Please upload an audio file to get started.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|