Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -5,6 +5,10 @@ import time
|
|
| 5 |
from transformers import pipeline
|
| 6 |
from pydub import AudioSegment
|
| 7 |
import speech_recognition as sr
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
# Streamlit Configuration
|
| 10 |
st.set_page_config(page_title="Interview Copilot", layout="wide")
|
|
@@ -63,39 +67,44 @@ if st.button("Start Interview"):
|
|
| 63 |
if 'responses' not in st.session_state:
|
| 64 |
st.session_state.responses = []
|
| 65 |
|
| 66 |
-
# Step 4: Question and Voice Response
|
| 67 |
current_question = all_questions[st.session_state.question_index]
|
| 68 |
st.write(f"**Question {st.session_state.question_index + 1}:** {current_question}")
|
| 69 |
-
st.write("🎤 Please record your answer
|
| 70 |
-
|
| 71 |
-
audio_file = st.file_uploader(f"Upload your answer to question {st.session_state.question_index + 1}:", type=["mp3", "wav", "ogg", "flac"])
|
| 72 |
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
audio
|
| 76 |
-
|
| 77 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 78 |
|
| 79 |
-
#
|
| 80 |
recognizer = sr.Recognizer()
|
| 81 |
-
with sr.AudioFile(audio_path) as source:
|
| 82 |
-
audio_data = recognizer.record(source)
|
| 83 |
try:
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
|
|
|
| 89 |
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
|
| 100 |
except sr.UnknownValueError:
|
| 101 |
st.error("Sorry, I couldn't understand the audio.")
|
|
|
|
| 5 |
from transformers import pipeline
|
| 6 |
from pydub import AudioSegment
|
| 7 |
import speech_recognition as sr
|
| 8 |
+
from streamlit_webrtc import webrtc_streamer, VideoTransformerBase
|
| 9 |
+
import numpy as np
|
| 10 |
+
import wave
|
| 11 |
+
import io
|
| 12 |
|
| 13 |
# Streamlit Configuration
|
| 14 |
st.set_page_config(page_title="Interview Copilot", layout="wide")
|
|
|
|
| 67 |
if 'responses' not in st.session_state:
|
| 68 |
st.session_state.responses = []
|
| 69 |
|
| 70 |
+
# Step 4: Question and Live Voice Response
|
| 71 |
current_question = all_questions[st.session_state.question_index]
|
| 72 |
st.write(f"**Question {st.session_state.question_index + 1}:** {current_question}")
|
| 73 |
+
st.write("🎤 Please record your answer using the microphone.")
|
|
|
|
|
|
|
| 74 |
|
| 75 |
+
# Live voice recording with streamlit-webrtc
|
| 76 |
+
audio_recorder = webrtc_streamer(
|
| 77 |
+
key="audio-recorder",
|
| 78 |
+
video_transformer_factory=VideoTransformerBase,
|
| 79 |
+
audio_source=True, # Enable audio recording
|
| 80 |
+
sendback_audio=True # Send back recorded audio
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
# Check if the user has recorded audio
|
| 84 |
+
if audio_recorder and audio_recorder.audio:
|
| 85 |
+
audio_data = audio_recorder.audio
|
| 86 |
+
audio_bytes = audio_data.tobytes() # Convert audio to byte format for processing
|
| 87 |
+
audio_file = io.BytesIO(audio_bytes) # Use BytesIO to simulate file for audio processing
|
| 88 |
|
| 89 |
+
# Process the audio
|
| 90 |
recognizer = sr.Recognizer()
|
|
|
|
|
|
|
| 91 |
try:
|
| 92 |
+
with sr.AudioFile(audio_file) as source:
|
| 93 |
+
audio_data = recognizer.record(source)
|
| 94 |
+
# Transcribe audio to text
|
| 95 |
+
transcript = recognizer.recognize_google(audio_data)
|
| 96 |
+
st.session_state.responses.append(transcript)
|
| 97 |
+
st.write(f"📝 Transcribed Response: {transcript}")
|
| 98 |
|
| 99 |
+
# Proceed to next question or complete interview
|
| 100 |
+
if st.session_state.question_index < len(all_questions) - 1:
|
| 101 |
+
st.session_state.question_index += 1
|
| 102 |
+
else:
|
| 103 |
+
st.session_state.question_index = 0 # Reset to first question after the last one
|
| 104 |
|
| 105 |
+
# Show the next question or finish the interview
|
| 106 |
+
if len(st.session_state.responses) < len(all_questions):
|
| 107 |
+
st.experimental_rerun()
|
| 108 |
|
| 109 |
except sr.UnknownValueError:
|
| 110 |
st.error("Sorry, I couldn't understand the audio.")
|