Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -69,7 +69,7 @@ def save_message(role, content):
|
|
| 69 |
# π¬ Display chat history
|
| 70 |
def display_chat_history():
|
| 71 |
messages = db.collection("users").document(user_id).collection("messages").order_by("timestamp").stream()
|
| 72 |
-
assistant_icon_html = "<img src='https://
|
| 73 |
for msg in list(messages)[::-1]:
|
| 74 |
data = msg.to_dict()
|
| 75 |
if data["role"] == "user":
|
|
@@ -77,7 +77,7 @@ def display_chat_history():
|
|
| 77 |
else:
|
| 78 |
st.markdown(f"<div class='stChatMessage' data-testid='stChatMessage-assistant'>{assistant_icon_html} <strong>LORAIN:</strong> {data['content']}</div>", unsafe_allow_html=True)
|
| 79 |
|
| 80 |
-
#
|
| 81 |
input_col, clear_col = st.columns([9, 1])
|
| 82 |
with input_col:
|
| 83 |
user_input = st.chat_input("Type your message here...")
|
|
@@ -100,22 +100,24 @@ display_chat_history()
|
|
| 100 |
if "mute_voice" not in st.session_state:
|
| 101 |
st.session_state["mute_voice"] = False
|
| 102 |
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106 |
with st.spinner("Synthesizing voice with GPT-4o..."):
|
| 107 |
speech_response = client.audio.speech.create(
|
| 108 |
-
model="tts-1",
|
| 109 |
-
voice="nova",
|
| 110 |
input=text,
|
| 111 |
response_format="mp3"
|
| 112 |
)
|
| 113 |
-
audio_path = f"output_{user_id}.mp3"
|
| 114 |
with open(audio_path, "wb") as f:
|
| 115 |
f.write(speech_response.content)
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
# os.remove(audio_path)
|
| 119 |
|
| 120 |
if user_input:
|
| 121 |
# Send user message to OpenAI thread
|
|
@@ -135,20 +137,24 @@ if user_input:
|
|
| 135 |
assistant_message = latest_response.content[0].text.value
|
| 136 |
save_message("assistant", assistant_message)
|
| 137 |
|
| 138 |
-
#
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
if st.button("π Play Voice", key="unmute"):
|
| 142 |
-
st.session_state["mute_voice"] = False
|
| 143 |
-
synthesize_and_play(assistant_message, False)
|
| 144 |
-
with col2:
|
| 145 |
-
if st.button("π Mute Voice", key="mute"):
|
| 146 |
-
st.session_state["mute_voice"] = True
|
| 147 |
-
st.info("Voice output muted for this and future messages.")
|
| 148 |
|
| 149 |
-
|
| 150 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 151 |
|
| 152 |
-
# Force Streamlit to rerun so chat refreshes and you get a new prompt
|
| 153 |
time.sleep(0.5)
|
| 154 |
st.rerun()
|
|
|
|
| 69 |
# π¬ Display chat history
|
| 70 |
def display_chat_history():
|
| 71 |
messages = db.collection("users").document(user_id).collection("messages").order_by("timestamp").stream()
|
| 72 |
+
assistant_icon_html = "<img src='https://lortechnologies.com/wp-content/uploads/2023/03/LOR-Online-Logo.svg' width='20' style='vertical-align:middle;'/>"
|
| 73 |
for msg in list(messages)[::-1]:
|
| 74 |
data = msg.to_dict()
|
| 75 |
if data["role"] == "user":
|
|
|
|
| 77 |
else:
|
| 78 |
st.markdown(f"<div class='stChatMessage' data-testid='stChatMessage-assistant'>{assistant_icon_html} <strong>LORAIN:</strong> {data['content']}</div>", unsafe_allow_html=True)
|
| 79 |
|
| 80 |
+
# --- Main Chat UI ---
|
| 81 |
input_col, clear_col = st.columns([9, 1])
|
| 82 |
with input_col:
|
| 83 |
user_input = st.chat_input("Type your message here...")
|
|
|
|
| 100 |
if "mute_voice" not in st.session_state:
|
| 101 |
st.session_state["mute_voice"] = False
|
| 102 |
|
| 103 |
+
if "last_tts_text" not in st.session_state:
|
| 104 |
+
st.session_state["last_tts_text"] = ""
|
| 105 |
+
|
| 106 |
+
def synthesize_voice(text, user_id):
|
| 107 |
+
audio_path = f"output_{user_id}.mp3"
|
| 108 |
+
# Only synthesize if text changed or file doesn't exist
|
| 109 |
+
if st.session_state["last_tts_text"] != text or not os.path.exists(audio_path):
|
| 110 |
with st.spinner("Synthesizing voice with GPT-4o..."):
|
| 111 |
speech_response = client.audio.speech.create(
|
| 112 |
+
model="tts-1",
|
| 113 |
+
voice="nova",
|
| 114 |
input=text,
|
| 115 |
response_format="mp3"
|
| 116 |
)
|
|
|
|
| 117 |
with open(audio_path, "wb") as f:
|
| 118 |
f.write(speech_response.content)
|
| 119 |
+
st.session_state["last_tts_text"] = text
|
| 120 |
+
return audio_path
|
|
|
|
| 121 |
|
| 122 |
if user_input:
|
| 123 |
# Send user message to OpenAI thread
|
|
|
|
| 137 |
assistant_message = latest_response.content[0].text.value
|
| 138 |
save_message("assistant", assistant_message)
|
| 139 |
|
| 140 |
+
# Voice autoplay unless muted
|
| 141 |
+
mute_voice = st.session_state.get("mute_voice", False)
|
| 142 |
+
audio_path = synthesize_voice(assistant_message, user_id) if not mute_voice else None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 143 |
|
| 144 |
+
if not mute_voice and audio_path:
|
| 145 |
+
st.audio(audio_path, format="audio/mp3", autoplay=True)
|
| 146 |
+
elif mute_voice:
|
| 147 |
+
st.info("π Voice is muted. To enable assistant speech, click 'Unmute Voice' below and ask another question.")
|
| 148 |
+
|
| 149 |
+
# Single mute/unmute button
|
| 150 |
+
if not mute_voice:
|
| 151 |
+
if st.button("π Mute Voice"):
|
| 152 |
+
st.session_state["mute_voice"] = True
|
| 153 |
+
st.rerun()
|
| 154 |
+
else:
|
| 155 |
+
if st.button("π Unmute Voice"):
|
| 156 |
+
st.session_state["mute_voice"] = False
|
| 157 |
+
st.rerun()
|
| 158 |
|
|
|
|
| 159 |
time.sleep(0.5)
|
| 160 |
st.rerun()
|