Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -400,7 +400,8 @@ def evaluate_answers():
|
|
| 400 |
For EACH question and its answer, provide a score from 0 to 5 points.
|
| 401 |
The candidate is at a {level_string} level.
|
| 402 |
Consider the following when assigning the per-question score:
|
| 403 |
-
- Effort
|
|
|
|
| 404 |
- Clarity of thought for the candidate's level.
|
| 405 |
- Basic logical structure.
|
| 406 |
- Use of examples, if any were given and appropriate.
|
|
@@ -409,11 +410,11 @@ def evaluate_answers():
|
|
| 409 |
level_specific_instructions_non_hr = """
|
| 410 |
You are an **extremely understanding, encouraging, and supportive** interview evaluator for a **BEGINNER/FRESHER**. Your primary goal is to **build confidence**.
|
| 411 |
**Scoring Guidelines for Beginners (0-5 points per question):**
|
| 412 |
-
- **5 points:**
|
| 413 |
-
- **4 points:** Good attempt, relevant, shows some understanding or key terms (e.g., one/two relevant words).
|
| 414 |
-
- **3 points:**
|
| 415 |
- **1-2 points:** Minimal effort, mostly irrelevant, but an attempt beyond silence.
|
| 416 |
-
- **0 points:**
|
| 417 |
Provide VERY positive feedback.
|
| 418 |
"""
|
| 419 |
elif level_string == "intermediate":
|
|
@@ -536,6 +537,7 @@ Guidelines:
|
|
| 536 |
- Use a mix of conceptual and real-world scenario questions.
|
| 537 |
- Include light critical thinking.
|
| 538 |
- Still no need for code, formulas, or complex diagrams.
|
|
|
|
| 539 |
Ensure the questions are clear, to the point, and suitable for a {difficulty_level}-level interview in {selected_domain}.
|
| 540 |
**New Requirement:**
|
| 541 |
π« **Do NOT repeat any questions from previous generations again and again.** Ensure all generated questions are unique and different from past sessions.
|
|
@@ -555,6 +557,7 @@ Guidelines:
|
|
| 555 |
- Expect detailed, logical, well-structured answers.
|
| 556 |
- Include challenging βwhyβ and βhowβ based questions.
|
| 557 |
- No need for code, but assume candidate has high expertise.
|
|
|
|
| 558 |
Ensure the questions are clear, to the point, and suitable for a {difficulty_level}-level interview in {selected_domain}.
|
| 559 |
**New Requirement:**
|
| 560 |
π« **Do NOT repeat any questions from previous generations again and again.** Ensure all generated questions are unique and different from past sessions.
|
|
@@ -819,7 +822,6 @@ def get_ice_servers():
|
|
| 819 |
|
| 820 |
# === Main QA Interface ===
|
| 821 |
|
| 822 |
-
|
| 823 |
if st.session_state.get("generated_questions"):
|
| 824 |
idx = st.session_state.get("current_question_index", 0)
|
| 825 |
if idx < len(st.session_state["generated_questions"]):
|
|
@@ -851,6 +853,7 @@ if st.session_state.get("generated_questions"):
|
|
| 851 |
st.session_state["record_phase"] = "waiting_to_start"
|
| 852 |
st.session_state["question_start_time"] = time.time()
|
| 853 |
st.rerun()
|
|
|
|
| 854 |
# Phase 2: Waiting to Start Recording
|
| 855 |
elif st.session_state["record_phase"] == "waiting_to_start":
|
| 856 |
remaining = 10 - int(elapsed)
|
|
@@ -879,105 +882,63 @@ if st.session_state.get("generated_questions"):
|
|
| 879 |
evaluate_answers()
|
| 880 |
st.session_state["show_summary"] = True
|
| 881 |
st.rerun()
|
|
|
|
| 882 |
# Phase 3: Recording
|
| 883 |
elif st.session_state["record_phase"] == "recording":
|
| 884 |
-
|
| 885 |
-
|
| 886 |
-
st.markdown(f"<h4 class='timer-text'>ποΈ {remaining} seconds to answer...</h4>", unsafe_allow_html=True)
|
| 887 |
-
|
| 888 |
-
audio_value = st.audio_input("π€ Tap to record β then stop when done", key=f"audio_{idx}")
|
| 889 |
-
|
| 890 |
-
# Defer processing by saving to temp_audio
|
| 891 |
-
if audio_value:
|
| 892 |
-
st.session_state["temp_audio"] = audio_value
|
| 893 |
-
st.rerun()
|
| 894 |
-
|
| 895 |
-
# Process the deferred audio input
|
| 896 |
-
if "temp_audio" in st.session_state and st.session_state.get("response_file") is None:
|
| 897 |
-
audio_value = st.session_state.pop("temp_audio")
|
| 898 |
-
wav_path = f"response_{idx}.wav"
|
| 899 |
-
with open(wav_path, "wb") as f:
|
| 900 |
-
f.write(audio_value.getbuffer())
|
| 901 |
-
|
| 902 |
-
recognizer = sr.Recognizer()
|
| 903 |
-
|
| 904 |
-
try:
|
| 905 |
-
with sr.AudioFile(wav_path) as source:
|
| 906 |
-
audio = recognizer.record(source)
|
| 907 |
-
transcript = recognizer.recognize_google(audio)
|
| 908 |
-
except sr.UnknownValueError:
|
| 909 |
-
transcript = "[Could not understand audio]"
|
| 910 |
-
except sr.RequestError:
|
| 911 |
-
transcript = "[Google API error]"
|
| 912 |
-
except Exception as e:
|
| 913 |
-
transcript = f"[Transcription failed: {e}]"
|
| 914 |
-
|
| 915 |
-
st.session_state["response_file"] = wav_path
|
| 916 |
-
st.session_state["record_phase"] = "listening"
|
| 917 |
-
st.success("β
Audio uploaded. You may now confirm your answer.")
|
| 918 |
-
|
| 919 |
-
st.session_state["answers"].append({
|
| 920 |
-
"question": question,
|
| 921 |
-
"response_file": wav_path,
|
| 922 |
-
"response": transcript
|
| 923 |
-
})
|
| 924 |
|
| 925 |
-
|
| 926 |
-
|
| 927 |
-
|
| 928 |
-
|
| 929 |
-
st.rerun()
|
| 930 |
|
| 931 |
-
|
| 932 |
-
|
| 933 |
-
|
| 934 |
-
|
| 935 |
-
|
| 936 |
-
|
| 937 |
-
|
| 938 |
-
|
| 939 |
-
|
| 940 |
-
|
| 941 |
-
|
| 942 |
-
st.rerun()
|
| 943 |
|
| 944 |
-
else:
|
| 945 |
-
st.markdown("<div style='padding:10px; background:#fff3e0; border-left:5px solid orange;'>β οΈ <strong>No response detected.</strong> Moving to next question...</div>", unsafe_allow_html=True)
|
| 946 |
-
st.session_state["answers"].append({"question": question, "response": "[No response]"})
|
| 947 |
st.session_state.update({
|
| 948 |
-
"
|
| 949 |
-
"
|
| 950 |
-
"
|
| 951 |
-
"question_start_time": 0.0,
|
| 952 |
-
"current_question_index": idx + 1
|
| 953 |
})
|
| 954 |
-
|
| 955 |
-
|
| 956 |
-
|
|
|
|
|
|
|
|
|
|
| 957 |
st.rerun()
|
| 958 |
|
| 959 |
# Phase 4: Listening / Confirming
|
| 960 |
elif st.session_state["record_phase"] == "listening":
|
| 961 |
st.success("π§ Review your recorded response below:")
|
| 962 |
st.audio(st.session_state["response_file"], format="audio/wav")
|
|
|
|
| 963 |
|
| 964 |
-
if st.button("
|
| 965 |
st.session_state.update({
|
| 966 |
"record_phase": "idle",
|
| 967 |
"recording_started": False,
|
| 968 |
"question_played": False,
|
| 969 |
"question_start_time": 0.0,
|
| 970 |
"current_question_index": idx + 1,
|
| 971 |
-
"response_file": None
|
| 972 |
-
"audio_waiting": True
|
| 973 |
})
|
| 974 |
-
|
| 975 |
if st.session_state["current_question_index"] == len(st.session_state["generated_questions"]):
|
| 976 |
evaluate_answers()
|
| 977 |
st.session_state["show_summary"] = True
|
| 978 |
st.rerun()
|
| 979 |
-
|
| 980 |
-
|
| 981 |
# === Summary Display ===
|
| 982 |
if st.session_state.get("show_summary", False):
|
| 983 |
st.subheader("π Complete Mock Interview Summary")
|
|
|
|
| 400 |
For EACH question and its answer, provide a score from 0 to 5 points.
|
| 401 |
The candidate is at a {level_string} level.
|
| 402 |
Consider the following when assigning the per-question score:
|
| 403 |
+
- Effort: Did the candidate attempt a meaningful answer, even if partially incorrect?
|
| 404 |
+
- Relevance: Is the response at least partially related to the question topic?
|
| 405 |
- Clarity of thought for the candidate's level.
|
| 406 |
- Basic logical structure.
|
| 407 |
- Use of examples, if any were given and appropriate.
|
|
|
|
| 410 |
level_specific_instructions_non_hr = """
|
| 411 |
You are an **extremely understanding, encouraging, and supportive** interview evaluator for a **BEGINNER/FRESHER**. Your primary goal is to **build confidence**.
|
| 412 |
**Scoring Guidelines for Beginners (0-5 points per question):**
|
| 413 |
+
- **5 points:** Accurate, clear, and well-structured answer. Shows clear effort and basic understanding.
|
| 414 |
+
- **4 points:** Mostly correct with minor gaps or unclear phrasing.Good attempt, relevant, shows some understanding or key terms (e.g., one/two relevant words).
|
| 415 |
+
- **3 points:** Partially correct with evident effort, but lacks clarity or completeness.
|
| 416 |
- **1-2 points:** Minimal effort, mostly irrelevant, but an attempt beyond silence.
|
| 417 |
+
- **0 points:** Candidate explicitly says "I donβt know", "I'm not sure", or provides placeholder/non-answers. No relevant effort or understanding shown.Incorrect or unrelated.
|
| 418 |
Provide VERY positive feedback.
|
| 419 |
"""
|
| 420 |
elif level_string == "intermediate":
|
|
|
|
| 537 |
- Use a mix of conceptual and real-world scenario questions.
|
| 538 |
- Include light critical thinking.
|
| 539 |
- Still no need for code, formulas, or complex diagrams.
|
| 540 |
+
- No coding or technical syntax required.
|
| 541 |
Ensure the questions are clear, to the point, and suitable for a {difficulty_level}-level interview in {selected_domain}.
|
| 542 |
**New Requirement:**
|
| 543 |
π« **Do NOT repeat any questions from previous generations again and again.** Ensure all generated questions are unique and different from past sessions.
|
|
|
|
| 557 |
- Expect detailed, logical, well-structured answers.
|
| 558 |
- Include challenging βwhyβ and βhowβ based questions.
|
| 559 |
- No need for code, but assume candidate has high expertise.
|
| 560 |
+
- No coding or technical syntax required.
|
| 561 |
Ensure the questions are clear, to the point, and suitable for a {difficulty_level}-level interview in {selected_domain}.
|
| 562 |
**New Requirement:**
|
| 563 |
π« **Do NOT repeat any questions from previous generations again and again.** Ensure all generated questions are unique and different from past sessions.
|
|
|
|
| 822 |
|
| 823 |
# === Main QA Interface ===
|
| 824 |
|
|
|
|
| 825 |
if st.session_state.get("generated_questions"):
|
| 826 |
idx = st.session_state.get("current_question_index", 0)
|
| 827 |
if idx < len(st.session_state["generated_questions"]):
|
|
|
|
| 853 |
st.session_state["record_phase"] = "waiting_to_start"
|
| 854 |
st.session_state["question_start_time"] = time.time()
|
| 855 |
st.rerun()
|
| 856 |
+
|
| 857 |
# Phase 2: Waiting to Start Recording
|
| 858 |
elif st.session_state["record_phase"] == "waiting_to_start":
|
| 859 |
remaining = 10 - int(elapsed)
|
|
|
|
| 882 |
evaluate_answers()
|
| 883 |
st.session_state["show_summary"] = True
|
| 884 |
st.rerun()
|
| 885 |
+
|
| 886 |
# Phase 3: Recording
|
| 887 |
elif st.session_state["record_phase"] == "recording":
|
| 888 |
+
st.markdown(f"<h4 class='timer-text'>ποΈ Recording... Click below to stop when done</h4>", unsafe_allow_html=True)
|
| 889 |
+
audio_value = st.audio_input("π€ Tap to record your answer β then stop when done", key=f"audio_{idx}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 890 |
|
| 891 |
+
if audio_value and st.button("βΉοΈ Stop Recording"):
|
| 892 |
+
wav_path = f"response_{idx}.wav"
|
| 893 |
+
with open(wav_path, "wb") as f:
|
| 894 |
+
f.write(audio_value.getbuffer())
|
|
|
|
| 895 |
|
| 896 |
+
recognizer = sr.Recognizer()
|
| 897 |
+
try:
|
| 898 |
+
with sr.AudioFile(wav_path) as source:
|
| 899 |
+
audio = recognizer.record(source)
|
| 900 |
+
transcript = recognizer.recognize_google(audio)
|
| 901 |
+
except sr.UnknownValueError:
|
| 902 |
+
transcript = "[Could not understand audio]"
|
| 903 |
+
except sr.RequestError:
|
| 904 |
+
transcript = "[Google API error]"
|
| 905 |
+
except Exception as e:
|
| 906 |
+
transcript = f"[Transcription failed: {e}]"
|
|
|
|
| 907 |
|
|
|
|
|
|
|
|
|
|
| 908 |
st.session_state.update({
|
| 909 |
+
"response_file": wav_path,
|
| 910 |
+
"record_phase": "listening",
|
| 911 |
+
"recorded_text": transcript
|
|
|
|
|
|
|
| 912 |
})
|
| 913 |
+
st.session_state["answers"].append({
|
| 914 |
+
"question": question,
|
| 915 |
+
"response_file": wav_path,
|
| 916 |
+
"response": transcript
|
| 917 |
+
})
|
| 918 |
+
st.success("β
Audio recorded. You may now confirm your answer.")
|
| 919 |
st.rerun()
|
| 920 |
|
| 921 |
# Phase 4: Listening / Confirming
|
| 922 |
elif st.session_state["record_phase"] == "listening":
|
| 923 |
st.success("π§ Review your recorded response below:")
|
| 924 |
st.audio(st.session_state["response_file"], format="audio/wav")
|
| 925 |
+
#st.markdown(f"**Your Response (text):** {st.session_state['recorded_text']}")
|
| 926 |
|
| 927 |
+
if st.button("β
Confirm & Next"):
|
| 928 |
st.session_state.update({
|
| 929 |
"record_phase": "idle",
|
| 930 |
"recording_started": False,
|
| 931 |
"question_played": False,
|
| 932 |
"question_start_time": 0.0,
|
| 933 |
"current_question_index": idx + 1,
|
| 934 |
+
"response_file": None
|
|
|
|
| 935 |
})
|
|
|
|
| 936 |
if st.session_state["current_question_index"] == len(st.session_state["generated_questions"]):
|
| 937 |
evaluate_answers()
|
| 938 |
st.session_state["show_summary"] = True
|
| 939 |
st.rerun()
|
| 940 |
+
|
| 941 |
+
|
| 942 |
# === Summary Display ===
|
| 943 |
if st.session_state.get("show_summary", False):
|
| 944 |
st.subheader("π Complete Mock Interview Summary")
|