Update app.py
Browse files
app.py
CHANGED
|
@@ -1,11 +1,11 @@
|
|
| 1 |
import streamlit as st
|
| 2 |
-
from streamlit_webrtc import webrtc_streamer, WebRtcMode
|
| 3 |
-
import av
|
| 4 |
-
import wave
|
| 5 |
import requests
|
| 6 |
-
import io
|
| 7 |
import numpy as np
|
|
|
|
|
|
|
|
|
|
| 8 |
|
|
|
|
| 9 |
st.set_page_config(page_title="Sai Vahini AI Assistant", layout="centered")
|
| 10 |
|
| 11 |
# β
Render API URL (Ensure this matches your deployed API on Render)
|
|
@@ -14,74 +14,16 @@ RENDER_API_URL = "https://saivahini.onrender.com/process_audio"
|
|
| 14 |
# β
UI Header
|
| 15 |
st.markdown("<h1 style='text-align: center; color: #ff5733;'>Sai Vahini AI Voice Assistant ποΈ</h1>", unsafe_allow_html=True)
|
| 16 |
|
| 17 |
-
# β
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
audio = frame.to_ndarray(format="s16le")
|
| 21 |
-
audio_bytes = audio.tobytes()
|
| 22 |
-
if "frames" not in st.session_state:
|
| 23 |
-
st.session_state.frames = []
|
| 24 |
-
st.session_state.frames.append(audio_bytes)
|
| 25 |
-
return av.AudioFrame.from_ndarray(audio, format="s16", layout="mono")
|
| 26 |
-
|
| 27 |
-
# β
WebRTC Streamer for recording
|
| 28 |
-
st.write("π€ **Click below to start speaking...**")
|
| 29 |
-
webrtc_streamer(
|
| 30 |
-
key="audio-recorder",
|
| 31 |
-
mode=WebRtcMode.SENDRECV,
|
| 32 |
-
audio_frame_callback=audio_frame_callback,
|
| 33 |
-
media_stream_constraints={"audio": True, "video": False},
|
| 34 |
-
)
|
| 35 |
-
|
| 36 |
-
# β
Check if audio frames exist
|
| 37 |
-
if "frames" not in st.session_state or not st.session_state.frames:
|
| 38 |
-
st.warning("β οΈ No audio recorded. Click the button above to start recording.")
|
| 39 |
-
|
| 40 |
-
# β
Process Button
|
| 41 |
-
if st.button("β
Process Recorded Audio"):
|
| 42 |
-
if "frames" in st.session_state and st.session_state.frames:
|
| 43 |
-
with st.spinner("π Processing your voice..."):
|
| 44 |
-
try:
|
| 45 |
-
# β
Convert recorded audio frames into WAV format
|
| 46 |
-
audio_bytes = io.BytesIO()
|
| 47 |
-
with wave.open(audio_bytes, "wb") as wf:
|
| 48 |
-
wf.setnchannels(1)
|
| 49 |
-
wf.setsampwidth(2)
|
| 50 |
-
wf.setframerate(16000)
|
| 51 |
-
wf.writeframes(b''.join(st.session_state.frames))
|
| 52 |
-
|
| 53 |
-
audio_bytes.seek(0) # Reset buffer pointer
|
| 54 |
-
|
| 55 |
-
# β
Send recorded audio to Render API
|
| 56 |
-
response = requests.post(RENDER_API_URL, files={"file": ("audio.wav", audio_bytes, "audio/wav")})
|
| 57 |
-
|
| 58 |
-
# β
Handle API response
|
| 59 |
-
if response.status_code == 200:
|
| 60 |
-
result = response.json()
|
| 61 |
-
st.success("β
AI Response:")
|
| 62 |
-
st.write("π **Transcription:**", result.get("transcription", "No transcription"))
|
| 63 |
-
st.write("π€ **Answer:**", result.get("response", "No response found."))
|
| 64 |
-
|
| 65 |
-
# β
Fetch and play AI-generated voice response
|
| 66 |
-
audio_response_url = result.get("audio")
|
| 67 |
-
if audio_response_url:
|
| 68 |
-
st.write(f"π **AI-generated voice response:**")
|
| 69 |
-
audio_response = requests.get(audio_response_url)
|
| 70 |
-
if audio_response.status_code == 200:
|
| 71 |
-
st.audio(audio_response.content, format="audio/wav")
|
| 72 |
-
else:
|
| 73 |
-
st.error(f"β Failed to load AI audio ({audio_response.status_code})")
|
| 74 |
-
else:
|
| 75 |
-
st.warning("β οΈ No audio response received from API.")
|
| 76 |
-
|
| 77 |
-
# β
Clear session state for new recording
|
| 78 |
-
st.session_state.frames = []
|
| 79 |
-
|
| 80 |
-
else:
|
| 81 |
-
st.error(f"β API Error: {response.status_code} - {response.text}")
|
| 82 |
|
| 83 |
-
|
| 84 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 85 |
|
| 86 |
-
|
| 87 |
-
|
|
|
|
| 1 |
import streamlit as st
|
|
|
|
|
|
|
|
|
|
| 2 |
import requests
|
|
|
|
| 3 |
import numpy as np
|
| 4 |
+
import sounddevice as sd
|
| 5 |
+
import wave
|
| 6 |
+
import io
|
| 7 |
|
| 8 |
+
# β
Set page title and layout
|
| 9 |
st.set_page_config(page_title="Sai Vahini AI Assistant", layout="centered")
|
| 10 |
|
| 11 |
# β
Render API URL (Ensure this matches your deployed API on Render)
|
|
|
|
| 14 |
# β
UI Header
|
| 15 |
st.markdown("<h1 style='text-align: center; color: #ff5733;'>Sai Vahini AI Voice Assistant ποΈ</h1>", unsafe_allow_html=True)
|
| 16 |
|
| 17 |
+
# β
Audio recording parameters
|
| 18 |
+
DURATION = 5 # Seconds
|
| 19 |
+
SAMPLE_RATE = 16000
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
+
# β
Function to record audio
|
| 22 |
+
def record_audio():
|
| 23 |
+
st.info("π€ Recording... Speak now!")
|
| 24 |
+
audio = sd.rec(int(DURATION * SAMPLE_RATE), samplerate=SAMPLE_RATE, channels=1, dtype=np.int16)
|
| 25 |
+
sd.wait() # Wait until recording is finished
|
| 26 |
+
st.success("β
Recording completed!")
|
| 27 |
|
| 28 |
+
# β
Save the audio as a WAV file
|
| 29 |
+
audio_bytes = io.BytesIO
|