Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -818,6 +818,7 @@ def get_ice_servers():
|
|
| 818 |
|
| 819 |
|
| 820 |
# === Main QA Interface ===
|
|
|
|
| 821 |
if st.session_state["generated_questions"]:
|
| 822 |
idx = st.session_state["current_question_index"]
|
| 823 |
if idx < len(st.session_state["generated_questions"]):
|
|
@@ -904,7 +905,7 @@ if st.session_state["generated_questions"]:
|
|
| 904 |
st.session_state["record_phase"] = "listening"
|
| 905 |
st.success("✅ Audio uploaded. You may now confirm your answer.")
|
| 906 |
#st.audio(wav_path, format="audio/wav")
|
| 907 |
-
""
|
| 908 |
if st.button("⏹️ Confirm & Next"):
|
| 909 |
try:
|
| 910 |
with st.spinner("🧠 Transcribing your answer..."):
|
|
@@ -916,7 +917,7 @@ if st.session_state["generated_questions"]:
|
|
| 916 |
except Exception as e:
|
| 917 |
st.error(f"❌ Transcription error: {e}")
|
| 918 |
transcript = "[Transcription error]"
|
| 919 |
-
""
|
| 920 |
|
| 921 |
st.session_state["answers"].append({
|
| 922 |
"question": question,
|
|
@@ -991,9 +992,165 @@ if st.session_state["generated_questions"]:
|
|
| 991 |
evaluate_answers()
|
| 992 |
st.session_state["show_summary"] = True
|
| 993 |
st.rerun()
|
|
|
|
| 994 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 995 |
|
| 996 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 997 |
|
| 998 |
# === Summary Display ===
|
| 999 |
if st.session_state.get("show_summary", False):
|
|
|
|
| 818 |
|
| 819 |
|
| 820 |
# === Main QA Interface ===
|
| 821 |
+
"""
|
| 822 |
if st.session_state["generated_questions"]:
|
| 823 |
idx = st.session_state["current_question_index"]
|
| 824 |
if idx < len(st.session_state["generated_questions"]):
|
|
|
|
| 905 |
st.session_state["record_phase"] = "listening"
|
| 906 |
st.success("✅ Audio uploaded. You may now confirm your answer.")
|
| 907 |
#st.audio(wav_path, format="audio/wav")
|
| 908 |
+
""
|
| 909 |
if st.button("⏹️ Confirm & Next"):
|
| 910 |
try:
|
| 911 |
with st.spinner("🧠 Transcribing your answer..."):
|
|
|
|
| 917 |
except Exception as e:
|
| 918 |
st.error(f"❌ Transcription error: {e}")
|
| 919 |
transcript = "[Transcription error]"
|
| 920 |
+
""
|
| 921 |
|
| 922 |
st.session_state["answers"].append({
|
| 923 |
"question": question,
|
|
|
|
| 992 |
evaluate_answers()
|
| 993 |
st.session_state["show_summary"] = True
|
| 994 |
st.rerun()
|
| 995 |
+
"""
|
| 996 |
|
| 997 |
+
if st.session_state.get("generated_questions"):
|
| 998 |
+
idx = st.session_state.get("current_question_index", 0)
|
| 999 |
+
if idx < len(st.session_state["generated_questions"]):
|
| 1000 |
+
question = st.session_state["generated_questions"][idx].lstrip("1234567890. ").strip()
|
| 1001 |
|
| 1002 |
+
# Phase 0: Generate & play question audio
|
| 1003 |
+
if not st.session_state.get("question_played"):
|
| 1004 |
+
st.session_state["question_audio_file"] = asyncio.run(generate_question_audio(question))
|
| 1005 |
+
st.session_state.update({
|
| 1006 |
+
"question_played": True,
|
| 1007 |
+
"question_start_time": time.time(),
|
| 1008 |
+
"record_phase": "audio_playing",
|
| 1009 |
+
"recorded_text": "",
|
| 1010 |
+
"response_file": None
|
| 1011 |
+
})
|
| 1012 |
+
st.markdown(f"**Q{idx + 1}:** {question}")
|
| 1013 |
+
st.audio(st.session_state["question_audio_file"], format="audio/mp3")
|
| 1014 |
+
|
| 1015 |
+
now = time.time()
|
| 1016 |
+
elapsed = now - st.session_state.get("question_start_time", 0)
|
| 1017 |
+
|
| 1018 |
+
# Phase 1: Audio Playing
|
| 1019 |
+
if st.session_state["record_phase"] == "audio_playing":
|
| 1020 |
+
if elapsed < 5:
|
| 1021 |
+
st.markdown("<h4 class='timer-text'>🔊 Playing question audio... Please listen</h4>", unsafe_allow_html=True)
|
| 1022 |
+
time.sleep(1)
|
| 1023 |
+
st.rerun()
|
| 1024 |
+
else:
|
| 1025 |
+
st.session_state["record_phase"] = "waiting_to_start"
|
| 1026 |
+
st.session_state["question_start_time"] = time.time()
|
| 1027 |
+
st.rerun()
|
| 1028 |
+
# Phase 2: Waiting to Start Recording
|
| 1029 |
+
elif st.session_state["record_phase"] == "waiting_to_start":
|
| 1030 |
+
remaining = 10 - int(elapsed)
|
| 1031 |
+
if remaining > 0:
|
| 1032 |
+
st.markdown(f"<h4 class='timer-text'>⏳ {remaining} seconds to click 'Start Recording'...</h4>", unsafe_allow_html=True)
|
| 1033 |
+
if st.button("🎙️ Start Recording"):
|
| 1034 |
+
st.session_state.update({
|
| 1035 |
+
"record_phase": "recording",
|
| 1036 |
+
"timer_start": time.time(),
|
| 1037 |
+
"recording_started": True,
|
| 1038 |
+
"response_file": None
|
| 1039 |
+
})
|
| 1040 |
+
st.rerun()
|
| 1041 |
+
time.sleep(1)
|
| 1042 |
+
st.rerun()
|
| 1043 |
+
else:
|
| 1044 |
+
st.markdown("<div style='padding:10px; background:#fff8e1; border-left:5px solid orange;color: #212529;'>⚠️ <strong>No action detected.</strong> Automatically skipping to next question...</div>", unsafe_allow_html=True)
|
| 1045 |
+
st.session_state["answers"].append({"question": question, "response": "[No response]"})
|
| 1046 |
+
st.session_state.update({
|
| 1047 |
+
"record_phase": "idle",
|
| 1048 |
+
"question_played": False,
|
| 1049 |
+
"question_start_time": 0.0,
|
| 1050 |
+
"current_question_index": idx + 1
|
| 1051 |
+
})
|
| 1052 |
+
if st.session_state["current_question_index"] == len(st.session_state["generated_questions"]):
|
| 1053 |
+
evaluate_answers()
|
| 1054 |
+
st.session_state["show_summary"] = True
|
| 1055 |
+
st.rerun()
|
| 1056 |
+
# Phase 3: Recording
|
| 1057 |
+
elif st.session_state["record_phase"] == "recording":
|
| 1058 |
+
remaining = 15 - int(now - st.session_state.get("timer_start", 0))
|
| 1059 |
+
if remaining > 0:
|
| 1060 |
+
st.markdown(f"<h4 class='timer-text'>🎙️ {remaining} seconds to answer...</h4>", unsafe_allow_html=True)
|
| 1061 |
+
|
| 1062 |
+
audio_value = st.audio_input("🎤 Tap to record — then stop when done", key=f"audio_{idx}")
|
| 1063 |
+
|
| 1064 |
+
# Defer processing by saving to temp_audio
|
| 1065 |
+
if audio_value:
|
| 1066 |
+
st.session_state["temp_audio"] = audio_value
|
| 1067 |
+
st.rerun()
|
| 1068 |
+
|
| 1069 |
+
# Process the deferred audio input
|
| 1070 |
+
if "temp_audio" in st.session_state and st.session_state.get("response_file") is None:
|
| 1071 |
+
audio_value = st.session_state.pop("temp_audio")
|
| 1072 |
+
wav_path = f"response_{idx}.wav"
|
| 1073 |
+
with open(wav_path, "wb") as f:
|
| 1074 |
+
f.write(audio_value.getbuffer())
|
| 1075 |
+
|
| 1076 |
+
recognizer = sr.Recognizer()
|
| 1077 |
+
|
| 1078 |
+
try:
|
| 1079 |
+
with sr.AudioFile(wav_path) as source:
|
| 1080 |
+
audio = recognizer.record(source)
|
| 1081 |
+
transcript = recognizer.recognize_google(audio)
|
| 1082 |
+
except sr.UnknownValueError:
|
| 1083 |
+
transcript = "[Could not understand audio]"
|
| 1084 |
+
except sr.RequestError:
|
| 1085 |
+
transcript = "[Google API error]"
|
| 1086 |
+
except Exception as e:
|
| 1087 |
+
transcript = f"[Transcription failed: {e}]"
|
| 1088 |
+
|
| 1089 |
+
st.session_state["response_file"] = wav_path
|
| 1090 |
+
st.session_state["record_phase"] = "listening"
|
| 1091 |
+
st.success("✅ Audio uploaded. You may now confirm your answer.")
|
| 1092 |
+
|
| 1093 |
+
st.session_state["answers"].append({
|
| 1094 |
+
"question": question,
|
| 1095 |
+
"response_file": wav_path,
|
| 1096 |
+
"response": transcript
|
| 1097 |
+
})
|
| 1098 |
+
|
| 1099 |
+
if st.session_state["current_question_index"] == len(st.session_state["generated_questions"]):
|
| 1100 |
+
evaluate_answers()
|
| 1101 |
+
st.session_state["show_summary"] = True
|
| 1102 |
+
|
| 1103 |
+
st.rerun()
|
| 1104 |
+
|
| 1105 |
+
if elapsed > 15 and st.session_state.get("response_file") is None:
|
| 1106 |
+
st.warning("⚠️ No audio captured. Moving to next question.")
|
| 1107 |
+
st.session_state["answers"].append({"question": question, "response": "[No response]"})
|
| 1108 |
+
st.session_state.update({
|
| 1109 |
+
"record_phase": "idle",
|
| 1110 |
+
"question_played": False,
|
| 1111 |
+
"current_question_index": idx + 1
|
| 1112 |
+
})
|
| 1113 |
+
if st.session_state["current_question_index"] == len(st.session_state["generated_questions"]):
|
| 1114 |
+
evaluate_answers()
|
| 1115 |
+
st.session_state["show_summary"] = True
|
| 1116 |
+
st.rerun()
|
| 1117 |
+
|
| 1118 |
+
else:
|
| 1119 |
+
st.markdown("<div style='padding:10px; background:#fff3e0; border-left:5px solid orange;'>⚠️ <strong>No response detected.</strong> Moving to next question...</div>", unsafe_allow_html=True)
|
| 1120 |
+
st.session_state["answers"].append({"question": question, "response": "[No response]"})
|
| 1121 |
+
st.session_state.update({
|
| 1122 |
+
"record_phase": "idle",
|
| 1123 |
+
"recording_started": False,
|
| 1124 |
+
"question_played": False,
|
| 1125 |
+
"question_start_time": 0.0,
|
| 1126 |
+
"current_question_index": idx + 1
|
| 1127 |
+
})
|
| 1128 |
+
if st.session_state["current_question_index"] == len(st.session_state["generated_questions"]):
|
| 1129 |
+
evaluate_answers()
|
| 1130 |
+
st.session_state["show_summary"] = True
|
| 1131 |
+
st.rerun()
|
| 1132 |
+
|
| 1133 |
+
# Phase 4: Listening / Confirming
|
| 1134 |
+
elif st.session_state["record_phase"] == "listening":
|
| 1135 |
+
st.success("🎧 Review your recorded response below:")
|
| 1136 |
+
st.audio(st.session_state["response_file"], format="audio/wav")
|
| 1137 |
+
|
| 1138 |
+
if st.button("⏹️ Confirm & Next"):
|
| 1139 |
+
st.session_state.update({
|
| 1140 |
+
"record_phase": "idle",
|
| 1141 |
+
"recording_started": False,
|
| 1142 |
+
"question_played": False,
|
| 1143 |
+
"question_start_time": 0.0,
|
| 1144 |
+
"current_question_index": idx + 1,
|
| 1145 |
+
"response_file": None,
|
| 1146 |
+
"audio_waiting": True
|
| 1147 |
+
})
|
| 1148 |
+
|
| 1149 |
+
if st.session_state["current_question_index"] == len(st.session_state["generated_questions"]):
|
| 1150 |
+
evaluate_answers()
|
| 1151 |
+
st.session_state["show_summary"] = True
|
| 1152 |
+
st.rerun()
|
| 1153 |
+
|
| 1154 |
|
| 1155 |
# === Summary Display ===
|
| 1156 |
if st.session_state.get("show_summary", False):
|