Spaces:
Sleeping
Sleeping
new UI and hold to speak feature
Browse files
app.py
CHANGED
|
@@ -1,57 +1,109 @@
|
|
| 1 |
import streamlit as st
|
| 2 |
import plotly.graph_objects as go
|
| 3 |
-
from huggingface_hub import hf_hub_download
|
| 4 |
-
from llama_cpp import Llama
|
| 5 |
from faster_whisper import WhisperModel
|
| 6 |
-
|
| 7 |
-
from audio_recorder_streamlit import audio_recorder
|
| 8 |
import json
|
| 9 |
-
import json_repair
|
| 10 |
import time
|
| 11 |
import io
|
|
|
|
|
|
|
| 12 |
|
| 13 |
# --- 1. CONFIGURATION ---
|
| 14 |
st.set_page_config(page_title="SomAI", layout="wide", page_icon="🩺")
|
| 15 |
|
| 16 |
-
#
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
model_path = hf_hub_download(repo_id=repo_id, filename=filename)
|
| 24 |
-
|
| 25 |
-
llm = Llama(
|
| 26 |
-
model_path=model_path,
|
| 27 |
-
n_ctx=1024,
|
| 28 |
-
n_threads=2,
|
| 29 |
-
n_batch=512,
|
| 30 |
-
verbose=False
|
| 31 |
-
)
|
| 32 |
-
llm("Hi", max_tokens=1)
|
| 33 |
|
| 34 |
-
|
|
|
|
|
|
|
| 35 |
print(">>> LOADING AUDIO SENSORS...")
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
|
|
|
| 39 |
|
| 40 |
try:
|
| 41 |
-
|
| 42 |
except Exception as e:
|
| 43 |
-
st.error(f"
|
| 44 |
st.stop()
|
| 45 |
|
| 46 |
-
# --- 3.
|
| 47 |
-
st.markdown("""
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
.
|
| 53 |
-
|
| 54 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
|
| 56 |
# --- 4. SESSION STATE ---
|
| 57 |
if 'history' not in st.session_state: st.session_state.history = []
|
|
@@ -61,129 +113,198 @@ if 'mode' not in st.session_state: st.session_state.mode = "GENERAL"
|
|
| 61 |
|
| 62 |
# --- 5. SIDEBAR ---
|
| 63 |
with st.sidebar:
|
| 64 |
-
st.markdown("### Patient Intake & Vitals")
|
| 65 |
with st.expander("Patient Profile", expanded=True):
|
| 66 |
name = st.text_input("Name", "Patient X")
|
| 67 |
age = st.slider("Age", 18, 90, 20)
|
| 68 |
-
|
|
|
|
| 69 |
with st.expander("Clinical Vitals", expanded=True):
|
| 70 |
bp = st.number_input("Systolic BP", 90, 220, 110)
|
| 71 |
glucose = st.number_input("Glucose", 70, 400, 110)
|
| 72 |
-
sleep = st.slider("Sleep", 0, 10, 4)
|
| 73 |
-
meds = st.slider("Missed Doses", 0, 7, 3)
|
| 74 |
|
| 75 |
if st.button("RUN CLINICAL ANALYSIS", type="primary", use_container_width=True):
|
| 76 |
-
with st.spinner("
|
| 77 |
-
score = 10
|
| 78 |
-
if bp > 140: score += 20
|
| 79 |
-
if glucose > 180: score += 20
|
| 80 |
-
if sleep < 5: score += 15
|
| 81 |
-
if meds > 2: score += 20
|
| 82 |
-
score = min(score, 100)
|
| 83 |
-
|
| 84 |
-
sys = "Medical Analyst. Output strictly valid JSON."
|
| 85 |
-
user_data = f"Age: {age}, BP: {bp}, Glucose: {glucose}, Risk: {score}/100."
|
| 86 |
-
task = """Task: 1. Risk Summary. 2. 3 Actions. Format: {"summary": "string", "actions": ["a", "b", "c"]}"""
|
| 87 |
-
prompt = f"<|system|>{sys}<|end|><|user|>{user_data}\n{task}<|end|><|assistant|>"
|
| 88 |
-
|
| 89 |
-
output = llm(prompt, max_tokens=200, temperature=0.1, stop=["<|end|>", "###"])
|
| 90 |
try:
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 97 |
|
| 98 |
val = st.session_state.risk_score
|
| 99 |
-
color = "#
|
| 100 |
-
|
| 101 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 102 |
st.plotly_chart(fig, use_container_width=True)
|
| 103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 104 |
|
| 105 |
# --- 6. MAIN CHAT ---
|
| 106 |
col_main = st.columns(1)[0]
|
| 107 |
with col_main:
|
| 108 |
-
st.markdown("## SomAI Assistant")
|
| 109 |
c1, c2 = st.columns(2)
|
| 110 |
-
if c1.button("Medical Guide", use_container_width=True, type="primary" if st.session_state.mode == "GENERAL" else "secondary"): st.session_state.mode = "GENERAL"
|
| 111 |
-
if c2.button("Therapist", use_container_width=True, type="primary" if st.session_state.mode == "THERAPY" else "secondary"): st.session_state.mode = "THERAPY"
|
| 112 |
-
|
|
|
|
| 113 |
chat_container = st.container(height=400)
|
| 114 |
for msg in st.session_state.history:
|
| 115 |
div_class = "user-bubble" if msg['role'] == "user" else "ai-bubble"
|
| 116 |
chat_container.markdown(f"<div class='chat-bubble {div_class}'>{msg['content']}</div>", unsafe_allow_html=True)
|
| 117 |
-
|
| 118 |
-
# --- ROBUST AUDIO INPUT (
|
| 119 |
-
st.
|
|
|
|
| 120 |
audio_bytes = audio_recorder(
|
| 121 |
text="",
|
| 122 |
-
recording_color="#
|
| 123 |
-
neutral_color="#
|
| 124 |
icon_name="microphone",
|
| 125 |
-
icon_size="
|
|
|
|
| 126 |
)
|
| 127 |
|
| 128 |
user_query = None
|
| 129 |
|
| 130 |
# 1. VOICE PROCESSING
|
| 131 |
if audio_bytes:
|
| 132 |
-
|
| 133 |
-
with st.spinner("Processing Voice..."):
|
| 134 |
audio_file = io.BytesIO(audio_bytes)
|
| 135 |
-
#
|
| 136 |
-
#
|
| 137 |
-
segments, info = whisper.transcribe(audio_file, beam_size=
|
| 138 |
text_list = [segment.text for segment in segments]
|
| 139 |
-
user_query = " ".join(text_list)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 140 |
|
| 141 |
# 2. TEXT PROCESSING
|
| 142 |
text_input = st.chat_input("...or type a message")
|
| 143 |
if text_input:
|
| 144 |
user_query = text_input
|
| 145 |
-
|
| 146 |
-
# 3. QUERY LOGIC
|
| 147 |
-
if user_query:
|
| 148 |
st.session_state.history.append({"role": "user", "content": user_query})
|
| 149 |
chat_container.markdown(f"<div class='chat-bubble user-bubble'>{user_query}</div>", unsafe_allow_html=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 150 |
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
st.stop()
|
| 162 |
-
|
| 163 |
-
if prompt_lower in ["hi", "hello", "hey"]:
|
| 164 |
-
resp = "Hello! I am SomAI. Describe your symptoms."
|
| 165 |
-
st.session_state.history.append({"role": "assistant", "content": resp})
|
| 166 |
-
chat_container.markdown(f"<div class='chat-bubble ai-bubble'>{resp}</div>", unsafe_allow_html=True)
|
| 167 |
-
st.stop()
|
| 168 |
-
|
| 169 |
-
if any(x in prompt_lower for x in ["die", "suicide", "kill"]):
|
| 170 |
-
resp = "🚨 Safety Protocol Engaged. Contact 988."
|
| 171 |
-
st.session_state.history.append({"role": "assistant", "content": resp})
|
| 172 |
-
chat_container.markdown(f"<div class='chat-bubble ai-bubble' style='border-color:red'>{resp}</div>", unsafe_allow_html=True)
|
| 173 |
-
st.stop()
|
| 174 |
-
|
| 175 |
-
# REAL AI
|
| 176 |
-
sys = "Medical Advisor. Ask 1 follow-up. Be concise." if st.session_state.mode == "GENERAL" else "Therapist. Be supportive."
|
| 177 |
-
full_prompt = f"<|system|>{sys}<|end|><|user|>{user_query}<|end|><|assistant|>"
|
| 178 |
-
|
| 179 |
-
with chat_container:
|
| 180 |
-
placeholder = st.empty()
|
| 181 |
-
full_resp = ""
|
| 182 |
-
stream = llm(full_prompt, max_tokens=300, stream=True, stop=["<|end|>", "###"])
|
| 183 |
-
for chunk in stream:
|
| 184 |
-
txt = chunk['choices'][0]['text']
|
| 185 |
-
full_resp += txt
|
| 186 |
-
placeholder.markdown(f"<div class='chat-bubble ai-bubble'>{full_resp}▌</div>", unsafe_allow_html=True)
|
| 187 |
-
placeholder.markdown(f"<div class='chat-bubble ai-bubble'>{full_resp}</div>", unsafe_allow_html=True)
|
| 188 |
|
| 189 |
-
|
|
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
import plotly.graph_objects as go
|
|
|
|
|
|
|
| 3 |
from faster_whisper import WhisperModel
|
| 4 |
+
import requests
|
|
|
|
| 5 |
import json
|
|
|
|
| 6 |
import time
|
| 7 |
import io
|
| 8 |
+
from audio_recorder_streamlit import audio_recorder
|
| 9 |
+
from PIL import Image
|
| 10 |
|
| 11 |
# --- 1. CONFIGURATION ---
|
| 12 |
st.set_page_config(page_title="SomAI", layout="wide", page_icon="🩺")
|
| 13 |
|
| 14 |
+
# The URL of your deployed FastAPI backend (Space 2: arshenoy/somAI-backend)
|
| 15 |
+
# NOTE: Replace with the actual URL when deployed. For local testing, use http://localhost:7860
|
| 16 |
+
# When deployed on HF Spaces, this may need to be the actual public URL or a service endpoint if using different Spaces.
|
| 17 |
+
# Assuming the backend is hosted and accessible.
|
| 18 |
+
BACKEND_API_URL = "https://<your-backend-space-name>.hf.space"
|
| 19 |
+
# Use this for local testing:
|
| 20 |
+
# BACKEND_API_URL = "http://localhost:7860"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
+
# --- 2. LOAD VOICE BRAIN (Keep Whisper on frontend for VTT) ---
|
| 23 |
+
@st.cache_resource
|
| 24 |
+
def load_whisper():
|
| 25 |
print(">>> LOADING AUDIO SENSORS...")
|
| 26 |
+
# Use 'tiny' for faster performance on free tier, or 'medium' for better accuracy
|
| 27 |
+
# 'large-v3' is too slow for frontend VTT in a live chat.
|
| 28 |
+
whisper_model = WhisperModel("tiny", device="cpu", compute_type="int8")
|
| 29 |
+
return whisper_model
|
| 30 |
|
| 31 |
try:
|
| 32 |
+
whisper = load_whisper()
|
| 33 |
except Exception as e:
|
| 34 |
+
st.error(f"WHISPER FAILURE: {e}")
|
| 35 |
st.stop()
|
| 36 |
|
| 37 |
+
# --- 3. NEW NEON STYLE CSS ---
|
| 38 |
+
st.markdown("""
|
| 39 |
+
<style>
|
| 40 |
+
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;600&family=JetBrains+Mono:wght@400;700&display=swap');
|
| 41 |
+
|
| 42 |
+
/* BASE THEME - Deep Space Black */
|
| 43 |
+
.stApp {
|
| 44 |
+
background-color: #050505;
|
| 45 |
+
background-image: radial-gradient(circle at 50% 50%, #1a1a1a 0%, #000000 100%);
|
| 46 |
+
font-family: 'Inter', sans-serif;
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
/* GLASS SIDEBAR */
|
| 50 |
+
section[data-testid="stSidebar"] {
|
| 51 |
+
background: rgba(10, 10, 10, 0.7);
|
| 52 |
+
backdrop-filter: blur(12px);
|
| 53 |
+
border-right: 1px solid rgba(255, 255, 255, 0.08);
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
/* NEON METRICS */
|
| 57 |
+
div[data-testid="metric-container"] {
|
| 58 |
+
background: rgba(255, 255, 255, 0.03);
|
| 59 |
+
border: 1px solid rgba(255, 255, 255, 0.1);
|
| 60 |
+
border-radius: 12px;
|
| 61 |
+
padding: 15px;
|
| 62 |
+
transition: 0.3s;
|
| 63 |
+
}
|
| 64 |
+
div[data-testid="metric-container"]:hover {
|
| 65 |
+
border-color: #00ff80;
|
| 66 |
+
box-shadow: 0 0 15px rgba(0, 255, 128, 0.1);
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
/* TEXT GLOW */
|
| 70 |
+
h1, h2, h3 {
|
| 71 |
+
font-family: 'JetBrains Mono', monospace;
|
| 72 |
+
letter-spacing: -0.5px;
|
| 73 |
+
color: #fff;
|
| 74 |
+
text-shadow: 0 0 10px rgba(255, 255, 255, 0.2);
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
/* CHAT BUBBLES - Updated for new dark background */
|
| 78 |
+
.chat-bubble {
|
| 79 |
+
padding: 12px 16px;
|
| 80 |
+
border-radius: 8px;
|
| 81 |
+
margin-bottom: 10px;
|
| 82 |
+
line-height: 1.5;
|
| 83 |
+
font-family: 'Inter', sans-serif;
|
| 84 |
+
font-size: 16px;
|
| 85 |
+
color: #e0e0e0;
|
| 86 |
+
}
|
| 87 |
+
.user-bubble {
|
| 88 |
+
background-color: #004d26; /* Darker green for user */
|
| 89 |
+
margin-left: 20%;
|
| 90 |
+
border-radius: 12px 12px 0 12px;
|
| 91 |
+
text-align: right;
|
| 92 |
+
}
|
| 93 |
+
.ai-bubble {
|
| 94 |
+
background-color: #1a1a1a; /* Dark gray for AI */
|
| 95 |
+
border: 1px solid rgba(255, 255, 255, 0.1);
|
| 96 |
+
margin-right: 20%;
|
| 97 |
+
border-radius: 12px 12px 12px 0;
|
| 98 |
+
text-align: left;
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
/* REMOVE JUNK */
|
| 102 |
+
#MainMenu {visibility: hidden;}
|
| 103 |
+
footer {visibility: hidden;}
|
| 104 |
+
</style>
|
| 105 |
+
""", unsafe_allow_html=True)
|
| 106 |
+
|
| 107 |
|
| 108 |
# --- 4. SESSION STATE ---
|
| 109 |
if 'history' not in st.session_state: st.session_state.history = []
|
|
|
|
| 113 |
|
| 114 |
# --- 5. SIDEBAR ---
|
| 115 |
with st.sidebar:
|
| 116 |
+
st.markdown("### 🧬 Patient Intake & Vitals")
|
| 117 |
with st.expander("Patient Profile", expanded=True):
|
| 118 |
name = st.text_input("Name", "Patient X")
|
| 119 |
age = st.slider("Age", 18, 90, 20)
|
| 120 |
+
condition = st.text_input("Primary Condition", "Diabetes/Hypertension")
|
| 121 |
+
|
| 122 |
with st.expander("Clinical Vitals", expanded=True):
|
| 123 |
bp = st.number_input("Systolic BP", 90, 220, 110)
|
| 124 |
glucose = st.number_input("Glucose", 70, 400, 110)
|
| 125 |
+
sleep = st.slider("Sleep Quality (0-10)", 0, 10, 4)
|
| 126 |
+
meds = st.slider("Missed Doses (Last 7 Days)", 0, 7, 3)
|
| 127 |
|
| 128 |
if st.button("RUN CLINICAL ANALYSIS", type="primary", use_container_width=True):
|
| 129 |
+
with st.spinner("Analyzing Clinical Markers..."):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 130 |
try:
|
| 131 |
+
# API CALL TO /analyze ENDPOINT
|
| 132 |
+
payload = {
|
| 133 |
+
"age": age,
|
| 134 |
+
"condition": condition,
|
| 135 |
+
"sleep_quality": sleep,
|
| 136 |
+
"missed_doses": meds,
|
| 137 |
+
"systolic_bp": bp,
|
| 138 |
+
"glucose": glucose
|
| 139 |
+
}
|
| 140 |
+
response = requests.post(f"{BACKEND_API_URL}/analyze", json=payload, timeout=30)
|
| 141 |
+
response.raise_for_status() # Raises an HTTPError for bad responses (4xx or 5xx)
|
| 142 |
+
|
| 143 |
+
data = response.json()
|
| 144 |
+
st.session_state.risk_score = data['numeric_score']
|
| 145 |
+
st.session_state.risk_summary = data['risk_summary']
|
| 146 |
+
|
| 147 |
+
except requests.exceptions.RequestException as req_err:
|
| 148 |
+
st.error(f"API Error: Cannot connect to backend (Code: {req_err.response.status_code if hasattr(req_err, 'response') and req_err.response else 'N/A'}). Ensure Space 2 is running.")
|
| 149 |
+
st.session_state.risk_score = 0
|
| 150 |
+
st.session_state.risk_summary = "Backend service unavailable."
|
| 151 |
+
except Exception as e:
|
| 152 |
+
st.error(f"Analysis Failed: {e}")
|
| 153 |
+
st.session_state.risk_score = 0
|
| 154 |
+
st.session_state.risk_summary = "Processing error."
|
| 155 |
+
|
| 156 |
|
| 157 |
val = st.session_state.risk_score
|
| 158 |
+
color = "#00ff80" if val < 40 else "#ffc300" if val < 80 else "#ff3300" # Neon color scheme
|
| 159 |
+
|
| 160 |
+
# Gauge Chart
|
| 161 |
+
fig = go.Figure(go.Indicator(
|
| 162 |
+
mode="gauge+number",
|
| 163 |
+
value=val,
|
| 164 |
+
number={'font': {'size': 40, 'color': 'white'}},
|
| 165 |
+
gauge={
|
| 166 |
+
'axis': {'range': [0, 100], 'tickcolor': '#333333'},
|
| 167 |
+
'bar': {'color': color},
|
| 168 |
+
'bgcolor': "rgba(26, 26, 26, 0.7)",
|
| 169 |
+
'bordercolor': "#333333",
|
| 170 |
+
'steps': [
|
| 171 |
+
{'range': [0, 40], 'color': 'rgba(0, 255, 128, 0.1)'},
|
| 172 |
+
{'range': [40, 80], 'color': 'rgba(255, 195, 0, 0.1)'},
|
| 173 |
+
{'range': [80, 100], 'color': 'rgba(255, 51, 0, 0.1)'},
|
| 174 |
+
]
|
| 175 |
+
}
|
| 176 |
+
))
|
| 177 |
+
fig.update_layout(
|
| 178 |
+
height=250,
|
| 179 |
+
margin=dict(l=10,r=10,t=30,b=10),
|
| 180 |
+
paper_bgcolor="rgba(0,0,0,0)",
|
| 181 |
+
font={'color': 'white', 'family': 'JetBrains Mono'}
|
| 182 |
+
)
|
| 183 |
st.plotly_chart(fig, use_container_width=True)
|
| 184 |
+
|
| 185 |
+
# Custom Metric Card with Border (replaces standard metric-card)
|
| 186 |
+
st.markdown(f"""
|
| 187 |
+
<div style="background: rgba(255, 255, 255, 0.05); border: 1px solid rgba(255, 255, 255, 0.1); border-radius: 12px; padding: 15px; margin-top: 15px; border-left: 4px solid {color};">
|
| 188 |
+
<h5 style="margin:0; color: {color}; font-family: 'JetBrains Mono', monospace;">CLINICAL ASSESSMENT</h5>
|
| 189 |
+
<p style="margin-top:5px; font-size: 0.9rem; color: #ddd;">{st.session_state.risk_summary}</p>
|
| 190 |
+
</div>
|
| 191 |
+
""", unsafe_allow_html=True)
|
| 192 |
|
| 193 |
# --- 6. MAIN CHAT ---
|
| 194 |
col_main = st.columns(1)[0]
|
| 195 |
with col_main:
|
| 196 |
+
st.markdown("## 🧠 SomAI Assistant")
|
| 197 |
c1, c2 = st.columns(2)
|
| 198 |
+
if c1.button("🩺 Medical Guide", use_container_width=True, type="primary" if st.session_state.mode == "GENERAL" else "secondary"): st.session_state.mode = "GENERAL"
|
| 199 |
+
if c2.button("🫂 Therapist", use_container_width=True, type="primary" if st.session_state.mode == "THERAPY" else "secondary"): st.session_state.mode = "THERAPY"
|
| 200 |
+
|
| 201 |
+
# Display History
|
| 202 |
chat_container = st.container(height=400)
|
| 203 |
for msg in st.session_state.history:
|
| 204 |
div_class = "user-bubble" if msg['role'] == "user" else "ai-bubble"
|
| 205 |
chat_container.markdown(f"<div class='chat-bubble {div_class}'>{msg['content']}</div>", unsafe_allow_html=True)
|
| 206 |
+
|
| 207 |
+
# --- ROBUST AUDIO INPUT (Hold and Speak) ---
|
| 208 |
+
st.markdown("---")
|
| 209 |
+
st.markdown("🎙️ **Hold & Speak:**")
|
| 210 |
audio_bytes = audio_recorder(
|
| 211 |
text="",
|
| 212 |
+
recording_color="#ff3300", # Neon Red for recording
|
| 213 |
+
neutral_color="#00ff80", # Neon Green for neutral
|
| 214 |
icon_name="microphone",
|
| 215 |
+
icon_size="3x",
|
| 216 |
+
initial_time=0 # Force 'hold-to-speak' mode
|
| 217 |
)
|
| 218 |
|
| 219 |
user_query = None
|
| 220 |
|
| 221 |
# 1. VOICE PROCESSING
|
| 222 |
if audio_bytes:
|
| 223 |
+
with st.spinner("🔊 Transcribing Voice..."):
|
|
|
|
| 224 |
audio_file = io.BytesIO(audio_bytes)
|
| 225 |
+
# Transcribe the audio
|
| 226 |
+
# Using a single file-like object directly with WhisperModel
|
| 227 |
+
segments, info = whisper.transcribe(audio_file, beam_size=5)
|
| 228 |
text_list = [segment.text for segment in segments]
|
| 229 |
+
user_query = " ".join(text_list).strip()
|
| 230 |
+
# If transcription is empty, handle gracefully
|
| 231 |
+
if not user_query:
|
| 232 |
+
st.warning("Could not detect speech. Please speak clearly.")
|
| 233 |
+
st.stop()
|
| 234 |
+
st.session_state.history.append({"role": "user", "content": user_query})
|
| 235 |
+
chat_container.markdown(f"<div class='chat-bubble user-bubble'>{user_query}</div>", unsafe_allow_html=True)
|
| 236 |
|
| 237 |
# 2. TEXT PROCESSING
|
| 238 |
text_input = st.chat_input("...or type a message")
|
| 239 |
if text_input:
|
| 240 |
user_query = text_input
|
|
|
|
|
|
|
|
|
|
| 241 |
st.session_state.history.append({"role": "user", "content": user_query})
|
| 242 |
chat_container.markdown(f"<div class='chat-bubble user-bubble'>{user_query}</div>", unsafe_allow_html=True)
|
| 243 |
+
|
| 244 |
+
# 3. QUERY LOGIC (API Call)
|
| 245 |
+
if user_query:
|
| 246 |
+
|
| 247 |
+
# --- LLM Response Generation (Streaming Emulation) ---
|
| 248 |
+
placeholder = chat_container.empty()
|
| 249 |
+
full_resp = ""
|
| 250 |
+
|
| 251 |
+
with placeholder.container():
|
| 252 |
+
with st.spinner("Thinking..."):
|
| 253 |
+
try:
|
| 254 |
+
# API Call to /generate ENDPOINT
|
| 255 |
+
payload = {
|
| 256 |
+
"query": user_query,
|
| 257 |
+
"age": age,
|
| 258 |
+
"condition": condition,
|
| 259 |
+
"mode": st.session_state.mode
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
# Using Stream=False for FastAPI/requests.post since the current FastAPI code doesn't support streaming.
|
| 263 |
+
# We will stream the *display* locally to emulate the effect.
|
| 264 |
+
response = requests.post(f"{BACKEND_API_URL}/generate", json=payload, timeout=60)
|
| 265 |
+
response.raise_for_status()
|
| 266 |
+
|
| 267 |
+
data = response.json()
|
| 268 |
+
raw_text = data['generated_text']
|
| 269 |
+
|
| 270 |
+
# Stream display emulation: Chunk the text and write it
|
| 271 |
+
# This simulates streaming visually even if the API is non-streaming.
|
| 272 |
+
chunk_size = 5 # words per chunk
|
| 273 |
+
words = raw_text.split()
|
| 274 |
+
|
| 275 |
+
for i in range(0, len(words), chunk_size):
|
| 276 |
+
chunk = " ".join(words[i:i + chunk_size])
|
| 277 |
+
full_resp += chunk + " "
|
| 278 |
+
|
| 279 |
+
# Use an empty container to display the response with a cursor effect
|
| 280 |
+
# and then replace it with the next chunk
|
| 281 |
+
placeholder.markdown(f"<div class='chat-bubble ai-bubble'>{full_resp}▌</div>", unsafe_allow_html=True)
|
| 282 |
+
time.sleep(0.05) # Adjust for speed
|
| 283 |
+
|
| 284 |
+
# Final display (no cursor)
|
| 285 |
+
placeholder.markdown(f"<div class='chat-bubble ai-bubble'>{raw_text}</div>", unsafe_allow_html=True)
|
| 286 |
+
|
| 287 |
+
st.session_state.history.append({"role": "assistant", "content": raw_text})
|
| 288 |
+
|
| 289 |
+
# Display suggestions below the chat container
|
| 290 |
+
suggestions = data.get('suggestions', [])
|
| 291 |
+
if suggestions:
|
| 292 |
+
st.markdown("---")
|
| 293 |
+
st.markdown("💡 **Next Steps:**")
|
| 294 |
+
suggestion_cols = st.columns(len(suggestions))
|
| 295 |
+
for i, sug in enumerate(suggestions):
|
| 296 |
+
suggestion_cols[i].button(sug, key=f"sug_{i}_{len(st.session_state.history)}", use_container_width=True)
|
| 297 |
|
| 298 |
+
except requests.exceptions.RequestException as req_err:
|
| 299 |
+
error_msg = f"API Error: {req_err}. Check backend service health."
|
| 300 |
+
st.error(error_msg)
|
| 301 |
+
st.session_state.history.append({"role": "assistant", "content": error_msg})
|
| 302 |
+
placeholder.markdown(f"<div class='chat-bubble ai-bubble'>{error_msg}</div>", unsafe_allow_html=True)
|
| 303 |
+
except Exception as e:
|
| 304 |
+
error_msg = f"LLM Generation Failed: {e}"
|
| 305 |
+
st.error(error_msg)
|
| 306 |
+
st.session_state.history.append({"role": "assistant", "content": error_msg})
|
| 307 |
+
placeholder.markdown(f"<div class='chat-bubble ai-bubble'>{error_msg}</div>", unsafe_allow_html=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 308 |
|
| 309 |
+
# Manually rerun to clear the input box and update history
|
| 310 |
+
st.rerun()
|