Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,6 +2,7 @@ import streamlit as st
|
|
| 2 |
import difflib
|
| 3 |
import requests
|
| 4 |
import datetime
|
|
|
|
| 5 |
|
| 6 |
# --- CONFIG ---
|
| 7 |
GROQ_API_KEY = st.secrets.get('GROQ_API_KEY', 'YOUR_GROQ_API_KEY')
|
|
@@ -162,7 +163,7 @@ elif page == "Semantic Search":
|
|
| 162 |
st.caption("Example questions:")
|
| 163 |
st.write(", ".join(EXAMPLE_QUESTIONS))
|
| 164 |
|
| 165 |
-
# ---
|
| 166 |
st.markdown("""
|
| 167 |
<style>
|
| 168 |
.input-mic-container {
|
|
@@ -211,12 +212,7 @@ elif page == "Semantic Search":
|
|
| 211 |
recognition.onresult = function(event) {
|
| 212 |
const transcript = event.results[0][0].transcript;
|
| 213 |
input.value = transcript;
|
| 214 |
-
|
| 215 |
-
const streamlitInput = window.parent.document.querySelector('input[data-testid="stTextInput"]');
|
| 216 |
-
if (streamlitInput) {
|
| 217 |
-
streamlitInput.value = transcript;
|
| 218 |
-
streamlitInput.dispatchEvent(new Event('input', { bubbles: true }));
|
| 219 |
-
}
|
| 220 |
micBtn.textContent = '🎤';
|
| 221 |
};
|
| 222 |
recognition.onerror = function() {
|
|
@@ -229,11 +225,14 @@ elif page == "Semantic Search":
|
|
| 229 |
micBtn.disabled = true;
|
| 230 |
micBtn.title = 'Voice not supported';
|
| 231 |
}
|
|
|
|
|
|
|
|
|
|
| 232 |
</script>
|
| 233 |
""", unsafe_allow_html=True)
|
| 234 |
|
| 235 |
-
#
|
| 236 |
-
question =
|
| 237 |
|
| 238 |
if st.button("Run Semantic Search"):
|
| 239 |
if not code_input.strip() or not question.strip():
|
|
|
|
| 2 |
import difflib
|
| 3 |
import requests
|
| 4 |
import datetime
|
| 5 |
+
from streamlit_js_eval import streamlit_js_eval
|
| 6 |
|
| 7 |
# --- CONFIG ---
|
| 8 |
GROQ_API_KEY = st.secrets.get('GROQ_API_KEY', 'YOUR_GROQ_API_KEY')
|
|
|
|
| 163 |
st.caption("Example questions:")
|
| 164 |
st.write(", ".join(EXAMPLE_QUESTIONS))
|
| 165 |
|
| 166 |
+
# --- Single input with mic button using streamlit_js_eval ---
|
| 167 |
st.markdown("""
|
| 168 |
<style>
|
| 169 |
.input-mic-container {
|
|
|
|
| 212 |
recognition.onresult = function(event) {
|
| 213 |
const transcript = event.results[0][0].transcript;
|
| 214 |
input.value = transcript;
|
| 215 |
+
window.dispatchEvent(new CustomEvent("streamlit_js_eval_result", {detail: transcript}));
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 216 |
micBtn.textContent = '🎤';
|
| 217 |
};
|
| 218 |
recognition.onerror = function() {
|
|
|
|
| 225 |
micBtn.disabled = true;
|
| 226 |
micBtn.title = 'Voice not supported';
|
| 227 |
}
|
| 228 |
+
input.onchange = function() {
|
| 229 |
+
window.dispatchEvent(new CustomEvent("streamlit_js_eval_result", {detail: input.value}));
|
| 230 |
+
}
|
| 231 |
</script>
|
| 232 |
""", unsafe_allow_html=True)
|
| 233 |
|
| 234 |
+
# Get the value from JS (typed or spoken)
|
| 235 |
+
question = streamlit_js_eval(js_expressions="document.getElementById('questionInput') ? document.getElementById('questionInput').value : ''", key="js_question")
|
| 236 |
|
| 237 |
if st.button("Run Semantic Search"):
|
| 238 |
if not code_input.strip() or not question.strip():
|