Imarticuslearning commited on
Commit
61ee582
·
verified ·
1 Parent(s): b32fad9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -15
app.py CHANGED
@@ -841,23 +841,57 @@ if st.session_state["generated_questions"]:
841
  remaining = 15 - int(now - st.session_state.get("timer_start", 0))
842
  if remaining > 0:
843
  st.markdown(f"<h4 class='timer-text'>🎙️ {remaining} seconds to answer...</h4>", unsafe_allow_html=True)
844
- recognizer = sr.Recognizer()
845
- with sr.Microphone() as source:
846
- recognizer.adjust_for_ambient_noise(source)
847
- try:
848
- audio = recognizer.listen(source, timeout=1, phrase_time_limit=1)
 
 
 
 
 
849
  try:
850
- result = recognizer.recognize_google(audio)
851
- st.session_state.update({
852
- "recorded_text": result,
853
- "record_phase": "listening"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854
  })
855
- except sr.UnknownValueError:
856
- pass
857
- except sr.WaitTimeoutError:
858
- pass
859
- time.sleep(1)
860
- st.rerun()
 
 
 
 
 
 
 
861
  else:
862
  st.markdown("<div style='padding:10px; background:#fff3e0; border-left:5px solid orange;'>⚠️ <strong>No response detected.</strong> Moving to next question...</div>", unsafe_allow_html=True)
863
  st.session_state["answers"].append({"question": question, "response": "[No response]"})
 
841
  remaining = 15 - int(now - st.session_state.get("timer_start", 0))
842
  if remaining > 0:
843
  st.markdown(f"<h4 class='timer-text'>🎙️ {remaining} seconds to answer...</h4>", unsafe_allow_html=True)
844
+ webrtc_ctx = webrtc_streamer(
845
+ key = f"webrtc_{idx}",
846
+ mode=WebRtcMode.SENDONLY,
847
+ audio_receiver_size=256,
848
+ media_stream_constraints={"audio": True, "video": False}
849
+ )
850
+
851
+ if webrtc_ctx.state.playing:
852
+ if st.button("⏹️ Stop Recording"):
853
+ wav_path = f"response_{idx}.wav"
854
  try:
855
+ frames = webrtc_ctx.audio_receiver.get_frames(timeout=1)
856
+ except Exception as e:
857
+ st.error(f"⚠️ Audio capture error: {e}")
858
+ frames = []
859
+
860
+ if frames:
861
+ try:
862
+ pcm = np.concatenate([f.to_ndarray() for f in frames], axis=0)
863
+ sample_rate = frames[0].sample_rate
864
+ sf.write(wav_path, pcm, sample_rate)
865
+ st.audio(wav_path)
866
+ st.session_state["answers"].append({
867
+ "question": question,
868
+ "response_file": wav_path
869
+ })
870
+ except Exception as e:
871
+ st.error(f"⚠️ Error saving recording: {e}")
872
+ st.session_state["answers"].append({
873
+ "question": question,
874
+ "response": "[Error saving recording]"
875
+ })
876
+ else:
877
+ st.warning("⚠️ No audio captured.")
878
+ st.session_state["answers"].append({
879
+ "question": question,
880
+ "response": "[No response]"
881
  })
882
+
883
+ st.session_state.update({
884
+ "record_phase": "idle",
885
+ "question_played": False,
886
+ "current_question_index": idx + 1
887
+ })
888
+
889
+ if st.session_state["current_question_index"] == len(st.session_state["generated_questions"]):
890
+ evaluate_answers()
891
+ st.session_state["show_summary"] = True
892
+
893
+ st.experimental_rerun()
894
+
895
  else:
896
  st.markdown("<div style='padding:10px; background:#fff3e0; border-left:5px solid orange;'>⚠️ <strong>No response detected.</strong> Moving to next question...</div>", unsafe_allow_html=True)
897
  st.session_state["answers"].append({"question": question, "response": "[No response]"})