Spaces:
Sleeping
Sleeping
Upload 7 files
Browse files- .env +1 -0
- .gitignore +3 -0
- .streamlit/secrets.toml +1 -0
- README.md +0 -13
- app.py +131 -0
- packages.txt +3 -0
- requirements.txt +9 -0
.env
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
SAMBANOVA_API_KEY=6f77154e-13ca-4e74-869d-183684dc7b3f
|
.gitignore
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.env
|
| 2 |
+
.streamlit/secrets.toml
|
| 3 |
+
secrets.toml
|
.streamlit/secrets.toml
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
SAMBANOVA_API_KEY= "6f77154e-13ca-4e74-869d-183684dc7b3f"
|
README.md
CHANGED
|
@@ -1,13 +0,0 @@
|
|
| 1 |
-
---
|
| 2 |
-
title: Comminication Ai
|
| 3 |
-
emoji: 👁
|
| 4 |
-
colorFrom: purple
|
| 5 |
-
colorTo: purple
|
| 6 |
-
sdk: streamlit
|
| 7 |
-
sdk_version: 1.40.1
|
| 8 |
-
app_file: app.py
|
| 9 |
-
pinned: false
|
| 10 |
-
license: apache-2.0
|
| 11 |
-
---
|
| 12 |
-
|
| 13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import whisper
|
| 3 |
+
from gtts import gTTS
|
| 4 |
+
from dotenv import load_dotenv
|
| 5 |
+
import openai
|
| 6 |
+
import streamlit as st
|
| 7 |
+
import tempfile
|
| 8 |
+
|
| 9 |
+
# Load environment variables
|
| 10 |
+
load_dotenv()
|
| 11 |
+
|
| 12 |
+
# Initialize Whisper Model
|
| 13 |
+
@st.cache_resource
|
| 14 |
+
def load_whisper_model():
|
| 15 |
+
return whisper.load_model("small")
|
| 16 |
+
|
| 17 |
+
whisper_model = load_whisper_model()
|
| 18 |
+
|
| 19 |
+
# Streamlit UI
|
| 20 |
+
st.title("Conversational AI with Speech-to-Speech Response")
|
| 21 |
+
st.write("Record your voice or upload an audio file to start the process.")
|
| 22 |
+
|
| 23 |
+
# Sidebar Interaction Mode
|
| 24 |
+
interaction_mode = st.sidebar.selectbox(
|
| 25 |
+
"Choose Interaction Mode:", ["Record Voice", "Upload Audio"]
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
# Record Voice Functionality with st.audio_input
|
| 29 |
+
if interaction_mode == "Record Voice":
|
| 30 |
+
st.write("Use the audio recorder below to record your voice:")
|
| 31 |
+
|
| 32 |
+
# Record audio using st.audio_input
|
| 33 |
+
audio_data = st.audio_input("Record your voice")
|
| 34 |
+
|
| 35 |
+
if audio_data:
|
| 36 |
+
st.info("Recording received. Processing...")
|
| 37 |
+
|
| 38 |
+
# Save the audio data to a temporary file
|
| 39 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_audio:
|
| 40 |
+
temp_audio.write(audio_data.getvalue()) # Use .getvalue() to extract raw bytes
|
| 41 |
+
temp_audio_path = temp_audio.name
|
| 42 |
+
|
| 43 |
+
# Play back the saved audio
|
| 44 |
+
st.audio(temp_audio_path, format="audio/wav")
|
| 45 |
+
st.success("Audio saved and ready for transcription!")
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
# Upload Audio Functionality
|
| 49 |
+
elif interaction_mode == "Upload Audio":
|
| 50 |
+
uploaded_file = st.file_uploader("Upload your audio file (MP3/WAV)", type=["mp3", "wav"])
|
| 51 |
+
|
| 52 |
+
if uploaded_file is not None:
|
| 53 |
+
st.info("File uploaded. Saving...")
|
| 54 |
+
|
| 55 |
+
# Save the uploaded audio file
|
| 56 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as temp_audio:
|
| 57 |
+
temp_audio.write(uploaded_file.read()) # Write uploaded audio content
|
| 58 |
+
temp_audio_path = temp_audio.name
|
| 59 |
+
|
| 60 |
+
# Play back the uploaded audio
|
| 61 |
+
st.audio(temp_audio_path, format="audio/mp3")
|
| 62 |
+
st.success("Audio uploaded and ready for transcription!")
|
| 63 |
+
|
| 64 |
+
# Transcribe and Process Audio
|
| 65 |
+
if 'temp_audio_path' in locals() and temp_audio_path:
|
| 66 |
+
st.write("Processing the audio file for transcription...")
|
| 67 |
+
|
| 68 |
+
with st.spinner("Transcribing audio..."):
|
| 69 |
+
result = whisper_model.transcribe(temp_audio_path)
|
| 70 |
+
user_text = result["text"]
|
| 71 |
+
st.write("Transcribed Text:", user_text)
|
| 72 |
+
st.success("Transcription complete!")
|
| 73 |
+
|
| 74 |
+
# Generate AI Response
|
| 75 |
+
st.write("Generating a conversational response...")
|
| 76 |
+
|
| 77 |
+
with st.spinner("Generating response..."):
|
| 78 |
+
|
| 79 |
+
client = openai.OpenAI(
|
| 80 |
+
#Uncomment below if you want to use .env file for localhost or other deployment
|
| 81 |
+
#api_key=os.environ.get("SAMBANOVA_API_KEY"),
|
| 82 |
+
|
| 83 |
+
#for streamlit deployment
|
| 84 |
+
api_key= st.secrets["SAMBANOVA_API_KEY"],
|
| 85 |
+
base_url="https://api.sambanova.ai/v1",
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
response = client.chat.completions.create(
|
| 89 |
+
model='Meta-Llama-3.1-8B-Instruct',
|
| 90 |
+
messages=[
|
| 91 |
+
{"role": "system", "content": (
|
| 92 |
+
"You are a kind, empathetic, and intelligent assistant capable of meaningful conversations and emotional support. "
|
| 93 |
+
"Your primary goals are: "
|
| 94 |
+
"1. To engage in casual, friendly, and supportive conversations when the user seeks companionship or emotional relief. "
|
| 95 |
+
"2. To adapt your tone and responses to match the user's mood, providing warmth and encouragement if they seem distressed or seeking emotional support. "
|
| 96 |
+
"3. To answer questions accurately and provide explanations when asked, adjusting the depth and length of your answers based on the user's needs. "
|
| 97 |
+
"4. To maintain a positive and non-judgmental tone, offering helpful advice or lighthearted dialogue when appropriate. "
|
| 98 |
+
"5. To ensure the user feels heard, understood, and valued during every interaction. "
|
| 99 |
+
"If the user does not ask a question, keep the conversation engaging and meaningful by responding thoughtfully or with light humor where appropriate."
|
| 100 |
+
)},
|
| 101 |
+
{"role": "user", "content": user_text},
|
| 102 |
+
],
|
| 103 |
+
temperature=0.1,
|
| 104 |
+
top_p=0.1,
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
answer = response.choices[0].message.content
|
| 108 |
+
st.write("Response:", answer)
|
| 109 |
+
st.success("Response generated!")
|
| 110 |
+
|
| 111 |
+
# Convert response text to speech using gTTS
|
| 112 |
+
st.write("Converting the response to speech...")
|
| 113 |
+
|
| 114 |
+
with st.spinner("Converting text to speech..."):
|
| 115 |
+
tts = gTTS(text=answer, slow=False)
|
| 116 |
+
response_audio_path = "final_response.mp3"
|
| 117 |
+
tts.save(response_audio_path)
|
| 118 |
+
st.success("Conversion complete!")
|
| 119 |
+
|
| 120 |
+
# Play and download the response MP3
|
| 121 |
+
st.audio(response_audio_path, format="audio/mp3")
|
| 122 |
+
st.download_button(
|
| 123 |
+
label="Download the Response",
|
| 124 |
+
data=open(response_audio_path, "rb"),
|
| 125 |
+
file_name="final_response.mp3",
|
| 126 |
+
mime="audio/mpeg",
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
# Clean up temporary files
|
| 130 |
+
os.remove(temp_audio_path)
|
| 131 |
+
os.remove(response_audio_path)
|
packages.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
libportaudio2
|
| 2 |
+
python3-all-dev
|
| 3 |
+
ffmpeg
|
requirements.txt
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
openai-whisper
|
| 2 |
+
gTTS
|
| 3 |
+
python-dotenv
|
| 4 |
+
openai
|
| 5 |
+
streamlit
|
| 6 |
+
sounddevice
|
| 7 |
+
numpy
|
| 8 |
+
torch
|
| 9 |
+
ffmpeg
|