Imarticuslearning commited on
Commit
4fc2955
Β·
verified Β·
1 Parent(s): 5bb42d0

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +1114 -0
app.py ADDED
@@ -0,0 +1,1114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import time
4
+ from dotenv import load_dotenv
5
+ import streamlit as st
6
+ import PyPDF2
7
+ import google.generativeai as genai
8
+ import speech_recognition as sr
9
+ from random import sample
10
+ import random
11
+ from html import escape
12
+ import asyncio
13
+ import edge_tts
14
+ import pandas as pd
15
+ import tempfile
16
+ import traceback
17
+ from streamlit_webrtc import webrtc_streamer, WebRtcMode
18
+ from twilio.rest import Client
19
+ import logging
20
+ import whisper
21
+ model = whisper.load_model("base")
22
+
23
+
24
+ # βœ… MUST be the first Streamlit command
25
+ st.set_page_config(page_title="GrillMaster", layout="wide")
26
+
27
+ # Load API key
28
+ load_dotenv()
29
+ genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
30
+
31
+ # Initialize session state
32
+ for key, default in {
33
+ "generated_questions": [],
34
+ "current_question_index": 0,
35
+ "answers": [],
36
+ "evaluation_feedback": "",
37
+ "overall_score": 0,
38
+ "percentage_score": 0,
39
+ "is_recording": False,
40
+ "question_played": False,
41
+ "selected_domain": "",
42
+ "response_captured": False,
43
+ "timer_start": None,
44
+ "show_summary": False,
45
+ "recorded_text": "",
46
+ "recording_complete": False,
47
+ "recording_started": False,
48
+ "audio_played": False,
49
+ "question_start_time": 0.0,
50
+ "record_phase": ""
51
+ }.items():
52
+ if key not in st.session_state:
53
+ st.session_state[key] = default
54
+
55
+ # Utility functions
56
+ def extract_pdf_text(uploaded_file):
57
+ pdf_reader = PyPDF2.PdfReader(uploaded_file)
58
+ return "".join(page.extract_text() or "" for page in pdf_reader.pages).strip()
59
+
60
+ def get_questions(prompt, input_text, num_questions=3, max_retries=10):
61
+ model = genai.GenerativeModel('gemini-1.5-pro-latest')
62
+
63
+ if "previous_questions" not in st.session_state:
64
+ st.session_state["previous_questions"] = set()
65
+
66
+ new_questions = []
67
+ retries = 0
68
+
69
+ while len(new_questions) < num_questions and retries < max_retries:
70
+ # Add artificial noise/randomness to input
71
+ noise = f" [session: {random.randint(1000,9999)} time: {time.time()}]"
72
+ modified_input = input_text + noise
73
+
74
+ response = model.generate_content([prompt, modified_input])
75
+ questions = [q.strip("*β€’- ") for q in response.text.strip().split("") if q.strip() and "question" not in q.lower()]
76
+
77
+ for q in questions:
78
+ if q not in st.session_state["previous_questions"]:
79
+ st.session_state["previous_questions"].add(q)
80
+ new_questions.append(q)
81
+ if len(new_questions) == num_questions:
82
+ break
83
+
84
+ retries += 1
85
+
86
+ return new_questions
87
+
88
+ async def generate_question_audio(question, voice="en-IE-EmilyNeural"):
89
+ clean_question = re.sub(r'[^A-Za-z0-9.,?! ]+', '', question)
90
+ tts = edge_tts.Communicate(text=clean_question, voice=voice)
91
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file:
92
+ await tts.save(tmp_file.name)
93
+ return tmp_file.name
94
+
95
+ ########################################///////////////////////////////////////////////////#########################################
96
+
97
+ # HR_PARAMETERS_CONFIG - Updated based on your latest Excel sheet (input_file_0.png)
98
+ # These are the parameters that can be judged from audio/text responses.
99
+ HR_PARAMETERS_CONFIG = {
100
+ "Voice Modulation": { # Non-Verbal Cues
101
+ "weight_original": 5,
102
+ "rubric": "1-5 (5=Good pace/tone, conversational; 3=Sounds Scripted/Slight Monotony; 1=Flat tone/Robotic)"
103
+ },
104
+ "Confidence": { # Personality
105
+ "weight_original": 7,
106
+ "rubric": "1-5 (5=Bold & Confident throughout; 3=Confused/Nervous in parts; 1=Extremely nervous/Timid)"
107
+ },
108
+ "Attitude": { # Personality
109
+ "weight_original": 3,
110
+ "rubric": "1-5 (5=Assertive, Positive, Open; 3=Neutral/Mildly defensive; 1=Aggressive/Pessimistic/Dismissive)"
111
+ },
112
+ "Flow & Fluency": { # Articulation
113
+ "weight_original": 20,
114
+ "rubric": "1-5 (5=Excellent Fluency, Spontaneous; 3=Initially struggles, then manages/Takes some time; 1=Many fillers/Pauses/Dead silence)"
115
+ },
116
+ "Structured thoughts & Clarity": { # Articulation
117
+ "weight_original": 10,
118
+ "rubric": "1-5 (5=Organized, Crisp, Coherent thoughts, e.g. STAR method; 3=Ideas are okay but clarity/structure could be better; 1=Incoherent/Rambling/Struggles to put thoughts into words)"
119
+ },
120
+ "Sentence Formation": { # Language Skills
121
+ "weight_original": 20,
122
+ "rubric": "1-5 (5=Good Clarity, Variety in sentence structure, Good Vocab; 3=Decent communication, might find some words difficult; 1=Talks in fragments/one-liners, Hard to understand)"
123
+ },
124
+ "Basics of Grammar + SVA": { # Language Skills (SVA = Subject-Verb Agreement)
125
+ "weight_original": 10,
126
+ "rubric": "1-5 (5=Good Command over Language, Minimal errors; 3=Average communicator, some errors but understandable; 1=Makes a lot of Grammatical Errors impacting clarity)"
127
+ },
128
+ "Persuasiveness": { # Rapport Building
129
+ "weight_original": 3,
130
+ "rubric": "1-5 (5=Impactful, Convincing Answers, Connects with interviewer; 3=Average or Common Answers; 1=Lacks Presence of Mind/No connection)"
131
+ },
132
+ "Quality of Answers": { # Rapport Building
133
+ "weight_original": 7,
134
+ "rubric": "1-5 (5=Handles questions well, Relevant & Thoughtful Answers, Asks good questions; 3=Very Generic Answers; 1=Vague/Lacks Depth/Shallow/Irrelevant)"
135
+ }
136
+ }
137
+
138
+ # Calculate total original weight for normalization
139
+ TOTAL_ORIGINAL_WEIGHT_HR = sum(param_data["weight_original"] for param_data in HR_PARAMETERS_CONFIG.values()) # Should be 85
140
+
141
+ # Add normalized weights to the config for calculating score out of 100
142
+ for param in HR_PARAMETERS_CONFIG:
143
+ HR_PARAMETERS_CONFIG[param]["weight_normalized"] = (HR_PARAMETERS_CONFIG[param]["weight_original"] / TOTAL_ORIGINAL_WEIGHT_HR) * 100
144
+
145
+
146
+ ########################################///////////////////////////////////////////////////#########################################
147
+ # SUmmary of improvement(function)
148
+
149
+ def generate_improvement_suggestions():
150
+ model = genai.GenerativeModel('gemini-1.5-pro-latest')
151
+ difficulty_level = st.session_state.get("difficulty_level_select", "Beginner")
152
+ level_string = difficulty_level.lower()
153
+
154
+ if not st.session_state.get("answers"):
155
+ st.session_state.improvement_suggestions = "No answers were recorded to generate improvement suggestions."
156
+ return
157
+
158
+ # Prepare the context for the LLM
159
+ qa_context = []
160
+ for i, entry in enumerate(st.session_state["answers"]):
161
+ qa_context.append(
162
+ f"Question {i+1}: {entry['question']}\n"
163
+ f"Candidate's Answer {i+1}: {str(entry.get('response', '[No response provided]'))}"
164
+ )
165
+ full_qa_context = "\n\n".join(qa_context)
166
+
167
+ initial_evaluation_feedback = st.session_state.get("evaluation_feedback", "Initial evaluation not available.")
168
+
169
+ # Remove any previous "Total Calculated Score..." line from the initial feedback
170
+ # to avoid confusing the LLM when it sees it as part of the context.
171
+ initial_evaluation_lines = initial_evaluation_feedback.splitlines()
172
+ cleaned_initial_evaluation = "\n".join(
173
+ line for line in initial_evaluation_lines if not line.strip().startswith("**Total Calculated Score:**")
174
+ )
175
+
176
+
177
+ improvement_prompt_template = """
178
+ You are an expert interview coach. You have the following information about a candidate's mock interview:
179
+ - Candidate's Level: {level_string}
180
+ - Questions Asked and Candidate's Answers:
181
+ {full_qa_context}
182
+ - Initial Evaluation Feedback Provided to Candidate:
183
+ ---
184
+ {cleaned_initial_evaluation}
185
+ ---
186
+
187
+ Based on all this information, your task is to provide DETAILED and CONSTRUCTIVE suggestions for each question to help the candidate improve. Be supportive and encouraging.
188
+
189
+ For EACH question, please provide:
190
+ 1. **How to Improve This Answer:** Specific, actionable advice on what the candidate could have added, clarified, or approached differently to make their answer better for their {level_string} level. Focus on 1-2 key improvement points.
191
+ 2. **Hints for an Ideal Answer:** Briefly mention 2-3 key concepts, terms, or elements that a strong answer (appropriate for their {level_string} level) would typically include. DO NOT provide a full model answer, just hints and pointers.
192
+
193
+ Keep the tone positive and focused on learning.
194
+
195
+ Structure your response clearly for each question. Example for one question:
196
+
197
+ ---
198
+ **Regarding Question X: "[Original Question Text Here]"**
199
+
200
+ *How to Improve This Answer:*
201
+ [Your specific suggestion 1 for improvement...]
202
+ [Your specific suggestion 2 for improvement...]
203
+
204
+ *Hints for an Ideal Answer (Key Points to Consider):*
205
+ - Hint 1 or Key concept 1
206
+ - Hint 2 or Key concept 2
207
+ - Hint 3 or Key element 3 (optional)
208
+ ---
209
+ (Repeat this structure for all questions)
210
+ """
211
+
212
+ formatted_improvement_prompt = improvement_prompt_template.format(
213
+ level_string=level_string,
214
+ full_qa_context=full_qa_context,
215
+ cleaned_initial_evaluation=cleaned_initial_evaluation
216
+ )
217
+
218
+ try:
219
+ st.info("πŸ€– Generating detailed improvement suggestions... Please wait.")
220
+ response = model.generate_content(formatted_improvement_prompt)
221
+ st.session_state.improvement_suggestions = response.text.strip()
222
+ st.session_state.improvement_suggestions_generated = True
223
+ st.success("Detailed suggestions generated!")
224
+ except Exception as e:
225
+ st.error(f"Error generating improvement suggestions: {e}")
226
+ st.session_state.improvement_suggestions = f"Could not generate suggestions due to an error: {e}"
227
+ st.session_state.improvement_suggestions_generated = False
228
+
229
+ ########################################///////////////////////////////////////////////////#########################################
230
+
231
+ # Evaluate candidate answers - YOUR FUNCTION
232
+
233
+
234
+
235
+ def evaluate_answers():
236
+ model = genai.GenerativeModel('gemini-1.5-pro-latest')
237
+ # difficulty_level_select is the key for the difficulty selectbox in your sidebar
238
+ difficulty_level = st.session_state.get("difficulty_level_select", "Beginner")
239
+ level_string = difficulty_level.lower()
240
+ num_answered_questions = len(st.session_state.get("answers", []))
241
+
242
+ # Reset improvement suggestions flag when re-evaluating
243
+ st.session_state.improvement_suggestions_generated = False
244
+ st.session_state.improvement_suggestions = ""
245
+
246
+ meaningful_answers_exist = False
247
+ if st.session_state.get("answers"):
248
+ for entry in st.session_state["answers"]:
249
+ response_text = str(entry.get('response', '')).strip().lower()
250
+ no_response_placeholders = [
251
+ "", "[no response provided]", "[no response - timed out]",
252
+ "[no response]", "no response", "[could not understand audio]",
253
+ "[no clear response recorded]", "[no action - timed out before recording]",
254
+ "[no speech detected in recording time]", "[no speech recorded - time up]",
255
+ "[recording stopped manually, possibly empty]",
256
+ "[no action - did not start recording]",
257
+ "[no speech detected in recording phase]"
258
+ ]
259
+ if response_text not in no_response_placeholders:
260
+ meaningful_answers_exist = True
261
+ break
262
+
263
+ if not meaningful_answers_exist:
264
+ no_answer_feedback_qualitative = "No meaningful answers were provided for evaluation.\n\n"
265
+ if st.session_state.selected_domain == "Soft Skills":
266
+ hr_params_na = "\n".join([f"- {param}: 0/5" for param in HR_PARAMETERS_CONFIG.keys()])
267
+ no_answer_feedback = (
268
+ "No meaningful answers were provided for evaluation.\n\n"
269
+ f"**Parameter Scores (1-5):**\n{hr_params_na}\n\n"
270
+ "**Overall Qualitative Feedback:**\nCandidate did not provide responses to evaluate soft skills."
271
+ )
272
+ st.session_state["hr_parameter_scores_dict"] = {param: 0.0 for param in HR_PARAMETERS_CONFIG.keys()} # Store zeroed scores
273
+ else: # Non-HR domains
274
+ no_answer_feedback = (
275
+ "No meaningful answers were provided.\n"
276
+ "**Total Calculated Score:** 0.0 / 0.0 (0.0%)\n\n" # Placeholder for non-HR if no answers
277
+ "**Overall Evaluation Summary:** N/A"
278
+ )
279
+ st.session_state["evaluation_feedback"] = no_answer_feedback
280
+ st.session_state["overall_score"] = 0.0
281
+ st.session_state["percentage_score"] = 0.0
282
+ return
283
+
284
+ # --- BRANCHING FOR HR (SOFT SKILLS) VS OTHER DOMAINS ---
285
+ if st.session_state.selected_domain == "Soft Skills":
286
+ hr_prompt_parameter_list = ""
287
+ for param, config in HR_PARAMETERS_CONFIG.items():
288
+ hr_prompt_parameter_list += f"- **{param}:** {config['rubric']}\n"
289
+
290
+ hr_prompt_template = f"""
291
+ You are an experienced HR interview evaluator assessing a candidate's soft skills based on their answers to interview questions.
292
+ The candidate's performance across ALL answers should inform your scores for the following parameters.
293
+
294
+ **Parameters to Score (Assign a score from 1 to 5 for each):**
295
+ {hr_prompt_parameter_list}
296
+
297
+ After providing a score (1-5) for each of the above parameters, also write an **Overall Qualitative Feedback** section.
298
+ This section should summarize the candidate's general soft skill strengths and areas for improvement, based on their communication, engagement, and professionalism throughout the interview.
299
+
300
+ **REQUIRED OUTPUT FORMAT (Strictly Adhere):**
301
+
302
+ **Parameter Scores (1-5):**
303
+ Voice Modulation: [score]
304
+ Confidence: [score]
305
+ Attitude: [score]
306
+ Flow & Fluency: [score]
307
+ Structured thoughts & Clarity: [score]
308
+ Sentence Formation: [score]
309
+ Basics of Grammar + SVA: [score]
310
+ Persuasiveness: [score]
311
+ Quality of Answers: [score]
312
+
313
+ **Overall Qualitative Feedback:**
314
+ [Your holistic qualitative feedback here. Be encouraging and constructive.]
315
+ """
316
+ candidate_responses_formatted_hr = "\n\n".join(
317
+ [f"Question {i+1}: {entry['question']}\nCandidate's Answer {i+1}: {str(entry.get('response', '[No response provided]'))}"
318
+ for i, entry in enumerate(st.session_state["answers"])]
319
+ )
320
+ full_prompt_for_hr_evaluation = f"{hr_prompt_template}\n\nCandidate's Interview Answers (Consider all of these for holistic parameter scoring):\n{candidate_responses_formatted_hr}"
321
+
322
+ try:
323
+ response_content = model.generate_content(full_prompt_for_hr_evaluation)
324
+ full_llm_response_text = response_content.text.strip()
325
+
326
+ print("--- LLM Output for HR Score Extraction ---")
327
+ print(full_llm_response_text)
328
+ print("-----------------------------------------")
329
+
330
+ hr_parameter_scores_parsed_dict = {} # To store parsed scores for each HR param
331
+ total_weighted_score_percentage = 0.0
332
+
333
+ for param_name_config, config_data in HR_PARAMETERS_CONFIG.items():
334
+ # Using a more specific regex, anchored to the start of a line (after optional list marker)
335
+ # re.escape ensures special characters in param_name_config are treated literally.
336
+ param_score_pattern = re.compile(
337
+ r"^\s*(?:[\*\-]\s*)?" + re.escape(param_name_config.split('(')[0].strip()) + r"\s*[:\-–—]?\s*(\d+(?:\.\d+)?)\b",
338
+ re.IGNORECASE | re.MULTILINE
339
+ ) # \b for word boundary after score
340
+
341
+ match = param_score_pattern.search(full_llm_response_text)
342
+ param_score = 1.0 # Default to 1 (lowest actual score) if not found or unparseable
343
+ if match:
344
+ try:
345
+ score_text = match.group(1)
346
+ param_score = float(score_text)
347
+ param_score = max(1.0, min(5.0, param_score)) # Clamp score strictly 1-5 for HR
348
+ print(f"HR Param '{param_name_config}' - Matched text: '{score_text}', Parsed: {param_score}")
349
+ except ValueError:
350
+ print(f"HR Param '{param_name_config}' - ValueError parsing score from '{score_text}' in match '{match.group(0)}'. Defaulting to 1.0.")
351
+ param_score = 1.0
352
+ else:
353
+ print(f"HR Param '{param_name_config}' - Score pattern not found. Defaulting to 1.0 for this param.")
354
+
355
+ hr_parameter_scores_parsed_dict[param_name_config] = param_score
356
+ total_weighted_score_percentage += (param_score / 5.0) * config_data["weight_normalized"] # Use normalized weight
357
+
358
+ st.session_state["hr_parameter_scores_dict"] = hr_parameter_scores_parsed_dict # Store for table display
359
+ st.session_state["overall_score"] = round(total_weighted_score_percentage, 1)
360
+ st.session_state["percentage_score"] = round(total_weighted_score_percentage, 1)
361
+
362
+ # Construct the feedback to be displayed: Parsed scores + Qualitative from LLM
363
+ # The full_llm_response_text might still be useful if qualitative parsing is tricky
364
+ parsed_scores_display_text = "**Parsed Parameter Scores (1-5 based on AI Evaluation):**\n"
365
+ for p_name, p_score in hr_parameter_scores_parsed_dict.items():
366
+ parsed_scores_display_text += f"- {p_name}: {p_score:.1f}/5\n"
367
+
368
+ qualitative_feedback_hr_extract = "Overall qualitative feedback section not clearly identified in AI response."
369
+ qualitative_match_hr = re.search(r"\*\*Overall Qualitative Feedback:\*\*(.*)", full_llm_response_text, re.DOTALL | re.IGNORECASE)
370
+ if qualitative_match_hr:
371
+ qualitative_feedback_hr_extract = qualitative_match_hr.group(1).strip()
372
+
373
+ st.session_state["evaluation_feedback"] = f"{parsed_scores_display_text}\n\n**Overall Qualitative Feedback from AI:**\n{qualitative_feedback_hr_extract}"
374
+
375
+ except Exception as e_hr_eval:
376
+ st.error(f"Error during HR/Soft Skills evaluation processing: {e_hr_eval}")
377
+ print(f"HR EVALUATION PROCESSING TRACEBACK:\n{traceback.format_exc()}")
378
+ st.session_state["evaluation_feedback"] = f"Could not process HR skills evaluation: {e_hr_eval}"
379
+ st.session_state["overall_score"] = 0.0
380
+ st.session_state["percentage_score"] = 0.0
381
+
382
+ else: # --- NON-HR (Analytics, Finance) Evaluation Logic ---
383
+ base_assessment_criteria_qualitative_non_hr = """
384
+ For the OVERALL qualitative summary, assess responses based on:
385
+ - Conceptual Understanding (effort and relevance more than perfect accuracy for the level)
386
+ - Communication Clarity (can the core idea be understood?)
387
+ - Depth of Explanation (relative to expected level)
388
+ - Use of Examples (if any, and if appropriate for the level)
389
+ - Logical Flow (is there a basic structure or train of thought?)
390
+ """
391
+ per_question_scoring_guidelines_non_hr = f"""
392
+ For EACH question and its answer, provide a score from 0 to 5 points.
393
+ The candidate is at a {level_string} level.
394
+ Consider the following when assigning the per-question score:
395
+ - Effort and relevance of the answer.
396
+ - Clarity of thought for the candidate's level.
397
+ - Basic logical structure.
398
+ - Use of examples, if any were given and appropriate.
399
+ """
400
+ if level_string == "beginner":
401
+ level_specific_instructions_non_hr = """
402
+ You are an **extremely understanding, encouraging, and supportive** interview evaluator for a **BEGINNER/FRESHER**. Your primary goal is to **build confidence**.
403
+ **Scoring Guidelines for Beginners (0-5 points per question):**
404
+ - **5 points:** Generally correct and relevant, even if brief. Shows clear effort and basic understanding.
405
+ - **4 points:** Good attempt, relevant, shows some understanding or key terms (e.g., one/two relevant words).
406
+ - **3 points:** Tries, somewhat related, or acknowledges question with a vague thought.
407
+ - **1-2 points:** Minimal effort, mostly irrelevant, but an attempt beyond silence.
408
+ - **0 points:** Completely irrelevant, no attempt, or placeholder.
409
+ Provide VERY positive feedback.
410
+ """
411
+ elif level_string == "intermediate":
412
+ level_specific_instructions_non_hr = """Supportive evaluator for **INTERMEDIATE**. Scoring (0-5): 5=Correct/Clear; 3-4=Mostly correct; 1-2=Partial/Gaps; 0=Incorrect."""
413
+ else: # Advanced
414
+ level_specific_instructions_non_hr = """Discerning evaluator for **ADVANCED**. Scoring (0-5): 5=Accurate/Comprehensive; 3-4=Correct lacks nuance; 1-2=Inaccurate; 0=Fundamentally incorrect."""
415
+
416
+ evaluation_prompt_template_non_hr = f"""
417
+ {level_specific_instructions_non_hr}
418
+ {per_question_scoring_guidelines_non_hr}
419
+ {base_assessment_criteria_qualitative_non_hr}
420
+ **YOUR RESPONSE MUST STRICTLY FOLLOW THIS FORMAT. PROVIDE SCORES FOR EACH QUESTION.**
421
+ Output format:
422
+
423
+ **Per-Question Scores:**
424
+ Question 1 Score: [Score for Q1 out of 5]
425
+ ... (repeat for all {num_answered_questions} questions provided)
426
+
427
+ **Overall Evaluation Summary:**
428
+ - Concept Understanding: [Overall qualitative feedback here]
429
+ - Communication: [Overall qualitative feedback here]
430
+ - Depth of Explanation: [Overall qualitative feedback here]
431
+ - Examples: [Overall qualitative feedback here]
432
+ - Logical Flow: [Overall qualitative feedback here]
433
+ [Any additional overall encouraging remarks can optionally follow here]
434
+ """
435
+ candidate_responses_formatted_non_hr = "\n\n".join(
436
+ [f"Question {i+1}: {entry['question']}\nAnswer {i+1}: {str(entry.get('response', '[No response provided]'))}" for i, entry in enumerate(st.session_state["answers"])]
437
+ )
438
+ full_prompt_for_non_hr_evaluation = f"{evaluation_prompt_template_non_hr}\n\nCandidate Responses:\n{candidate_responses_formatted_non_hr}"
439
+
440
+ try:
441
+ response_content_non_hr = model.generate_content(full_prompt_for_non_hr_evaluation)
442
+ full_llm_response_text_non_hr = response_content_non_hr.text.strip()
443
+ raw_llm_feedback_non_hr = full_llm_response_text_non_hr
444
+
445
+ print("--- LLM Output for Non-HR Score Extraction ---"); print(full_llm_response_text_non_hr); print("---")
446
+
447
+ total_score_non_hr = 0.0; parsed_scores_count_non_hr = 0; per_question_scores_list_non_hr = []
448
+ score_line_pattern_non_hr = re.compile(r"Question\s*(\d+)\s*Score:\s*(\d+(?:\.\d+)?)(?:\s*/\s*5)?", re.IGNORECASE)
449
+ text_to_search_non_hr = full_llm_response_text_non_hr
450
+ scores_block_match_non_hr = re.search(r"(?i)\*\*Per-Question Scores:\*\*(.*?)(?=\*\*Overall Evaluation Summary:\*\*|\Z)", text_to_search_non_hr, re.DOTALL)
451
+
452
+ if scores_block_match_non_hr:
453
+ text_to_search_non_hr = scores_block_match_non_hr.group(1).strip()
454
+ print(f"Non-HR: Found 'Per-Question Scores' block:\n{text_to_search_non_hr}")
455
+ else:
456
+ print("Non-HR: No dedicated 'Per-Question Scores' block found; searching entire response.")
457
+
458
+
459
+ for match_non_hr in score_line_pattern_non_hr.finditer(text_to_search_non_hr):
460
+ q_num_text_non_hr, score_val_text_non_hr = match_non_hr.group(1), match_non_hr.group(2)
461
+ try:
462
+ score_non_hr = float(score_val_text_non_hr)
463
+ score_non_hr = max(0.0, min(5.0, score_non_hr))
464
+ total_score_non_hr += score_non_hr
465
+ parsed_scores_count_non_hr += 1
466
+ per_question_scores_list_non_hr.append(f"Question {q_num_text_non_hr}: {score_non_hr:.1f}/5")
467
+ print(f"Non-HR Matched Q{q_num_text_non_hr} Score: {score_non_hr}")
468
+ except ValueError:
469
+ print(f"Non-HR Warning: Could not parse score '{score_val_text_non_hr}' from: '{match_non_hr.group(0)}'")
470
+
471
+ if parsed_scores_count_non_hr != num_answered_questions and meaningful_answers_exist:
472
+ st.warning(f"Non-HR Score Count Mismatch: Parsed {parsed_scores_count_non_hr} scores, expected {num_answered_questions}.")
473
+ print(f"Non-HR Score Count Mismatch: Expected {num_answered_questions}, got {parsed_scores_count_non_hr}")
474
+
475
+ if parsed_scores_count_non_hr == 0 and meaningful_answers_exist:
476
+ st.warning("CRITICAL (Non-HR): No per-question scores parsed from LLM response. Total score set to 0.")
477
+ print("CRITICAL (Non-HR): No per-question scores parsed.")
478
+ total_score_non_hr = 0.0
479
+
480
+ max_score_non_hr = num_answered_questions * 5.0
481
+ st.session_state["overall_score"] = total_score_non_hr
482
+ st.session_state["percentage_score"] = (total_score_non_hr / max_score_non_hr) * 100.0 if max_score_non_hr > 0 else 0.0
483
+
484
+ final_feedback_non_hr = f"**Total Calculated Score:** {st.session_state['overall_score']:.1f} / {max_score_non_hr:.1f} ({st.session_state['percentage_score']:.1f}%)\n\n"
485
+ if per_question_scores_list_non_hr:
486
+ final_feedback_non_hr += "**Parsed Per-Question Scores:**\n" + "\n".join(per_question_scores_list_non_hr) + "\n\n"
487
+
488
+ qual_summary_match_non_hr = re.search(r"\*\*Overall Evaluation Summary:\*\*(.*)", raw_llm_feedback_non_hr, re.DOTALL | re.IGNORECASE)
489
+ if qual_summary_match_non_hr:
490
+ final_feedback_non_hr += "**Overall Qualitative Summary (from AI):**\n" + qual_summary_match_non_hr.group(1).strip()
491
+ else:
492
+ final_feedback_non_hr += "\n---\n**Full AI Response (for context if summary parsing failed):**\n" + raw_llm_feedback_non_hr
493
+ st.session_state["evaluation_feedback"] = final_feedback_non_hr.strip()
494
+
495
+ except Exception as e_non_hr_eval:
496
+ st.error(f"Error during Non-HR evaluation processing: {e_non_hr_eval}")
497
+ print(f"NON-HR EVALUATION PROCESSING TRACEBACK:\n{traceback.format_exc()}")
498
+ st.session_state["evaluation_feedback"] = f"Could not process Non-HR evaluation: {e_non_hr_eval}"
499
+ st.session_state["overall_score"] = 0.0
500
+ st.session_state["percentage_score"] = 0.0
501
+ ########################################///////////////////////////////////////////////////#########################################
502
+ # --- Prompts for Question Generation ---
503
+ BEGINNER_PROMPT = """
504
+ You are a friendly mock interview trainer conducting a **Beginner-level** spoken interview in the domain of **{domain}**.
505
+ Ask basic verbal interview questions based on the candidate's input: **{input_text}**.
506
+
507
+ Guidelines:
508
+ - Ask simple conceptual questions.
509
+ - Avoid jargon and complex examples.
510
+ - Use easy language.
511
+ - No coding or technical syntax required.
512
+ Ensure the questions are clear, to the point, and suitable for a {difficulty_level}-level interview in {selected_domain}.
513
+ **New Requirement:**
514
+ 🚫 **Do NOT repeat any questions from previous generations again and again.** Ensure all generated questions are unique and different from past sessions.
515
+
516
+ **Guidelines:**
517
+ βœ… Questions should focus on key concepts, best practices, and problem-solving within {selected_domain}.
518
+ βœ… Ensure questions are direct, structured, and relevant to real-world applications.
519
+ ❌ Do NOT include greetings like 'Let's begin' or 'Welcome to the interview'.
520
+ ❌ Avoid vague or open-ended statementsβ€”each question should be concise and specific.
521
+ """
522
+
523
+ INTERMEDIATE_PROMPT = """
524
+ You are a professional mock interviewer conducting an **Intermediate-level** spoken interview in the domain of **{domain}**.
525
+ Ask moderately challenging verbal interview questions based on the candidate's input: **{input_text}**.
526
+
527
+ Guidelines:
528
+ - Use a mix of conceptual and real-world scenario questions.
529
+ - Include light critical thinking.
530
+ - Still no need for code, formulas, or complex diagrams.
531
+ Ensure the questions are clear, to the point, and suitable for a {difficulty_level}-level interview in {selected_domain}.
532
+ **New Requirement:**
533
+ 🚫 **Do NOT repeat any questions from previous generations again and again.** Ensure all generated questions are unique and different from past sessions.
534
+
535
+ **Guidelines:**
536
+ βœ… Questions should focus on key concepts, best practices, and problem-solving within {selected_domain}.
537
+ βœ… Ensure questions are direct, structured, and relevant to real-world applications.
538
+ ❌ Do NOT include greetings like 'Let's begin' or 'Welcome to the interview'.
539
+ ❌ Avoid vague or open-ended statementsβ€”each question should be concise and specific.
540
+ """
541
+
542
+ ADVANCED_PROMPT = """
543
+ You are a strict mock interviewer conducting an **Advanced-level** spoken interview in the domain of **{domain}**.
544
+ Ask deep, analytical, real-world scenario-based questions from the candidate's input: **{input_text}**.
545
+
546
+ Guidelines:
547
+ - Expect detailed, logical, well-structured answers.
548
+ - Include challenging β€œwhy” and β€œhow” based questions.
549
+ - No need for code, but assume candidate has high expertise.
550
+ Ensure the questions are clear, to the point, and suitable for a {difficulty_level}-level interview in {selected_domain}.
551
+ **New Requirement:**
552
+ 🚫 **Do NOT repeat any questions from previous generations again and again.** Ensure all generated questions are unique and different from past sessions.
553
+
554
+ **Guidelines:**
555
+ βœ… Questions should focus on key concepts, best practices, and problem-solving within {selected_domain}.
556
+ βœ… Ensure questions are direct, structured, and relevant to real-world applications.
557
+ ❌ Do NOT include greetings like 'Let's begin' or 'Welcome to the interview'.
558
+ ❌ Avoid vague or open-ended statementsβ€”each question should be concise and specific.
559
+ """
560
+
561
+ ########################################///////////////////////////////////////////////////#########################################
562
+ # UI styles
563
+ st.markdown("""
564
+ <style>
565
+ /* Base style for all stButton elements */
566
+ .stButton > button {
567
+ background-color: #007BFF !important;
568
+ color: white !important;
569
+ border-radius: 10px !important;
570
+ font-weight: bold !important;
571
+ width: 100% !important;
572
+ padding: 0.4rem 0.75rem !important;
573
+ font-size: 0.95rem !important;
574
+ line-height: 1.5 !important;
575
+ border: 1px solid transparent !important;
576
+ transition: background-color 0.2s ease-in-out, border-color 0.2s ease-in-out, box-shadow 0.2s ease-in-out !important;
577
+ margin-bottom: 8px !important;
578
+ box-sizing: border-box;
579
+ }
580
+ .stButton > button:hover {
581
+ background-color: #0056b3 !important;
582
+ color: white !important;
583
+ border-color: #0056b3 !important;
584
+ }
585
+ .stButton > button:focus,
586
+ .stButton > button:active {
587
+ background-color: #0056b3 !important;
588
+ border-color: #004085 !important;
589
+ box-shadow: 0 0 0 0.2rem rgba(0,123,255,.5) !important;
590
+ outline: none !important;
591
+ }
592
+
593
+ .timer-text {
594
+ font-size: 1.3rem;
595
+ font-weight: 600;
596
+ color: #00bcd4;
597
+ animation: pulse 1s infinite;
598
+ }
599
+ @keyframes pulse {
600
+ 0% {opacity: 1;}
601
+ 50% {opacity: 0.4;}
602
+ 100% {opacity: 1;}
603
+ }
604
+
605
+ .summary-card {
606
+ background-color: #f9f9f9;
607
+ padding: 20px;
608
+ border-radius: 12px;
609
+ border: 1px solid #ddd;
610
+ box-shadow: 0 2px 6px rgba(0, 0, 0, 0.05);
611
+ }
612
+ /* More specific selector for the pre text color */
613
+ div.summary-card > pre {
614
+ white-space: pre-wrap !important;
615
+ word-wrap: break-word !important;
616
+ font-family: inherit !important;
617
+ font-size: 0.95rem !important;
618
+ color: #000000 !important; /* TRYING PURE BLACK with !important */
619
+ background-color: #ffffff !important; /* Ensure background is white */
620
+ padding: 15px !important;
621
+ border-radius: 8px !important;
622
+ border: 1px solid #e0e0e0 !important;
623
+ max-height: 400px !important;
624
+ overflow-y: auto !important;
625
+ }
626
+ </style>
627
+ """, unsafe_allow_html=True)
628
+
629
+ # Header
630
+ st.markdown("""
631
+ <div style='text-align: center; margin-top: -30px; padding-top: 10px;'>
632
+ <h1 style='font-size: 2.8rem; font-weight: 800; color: #003366;'>🎯 Welcome to <span style='color: #007BFF;'>GrillMaster</span></h1>
633
+ <p style='font-size: 1.1rem; color: #555;'>Your AI-powered mock interview assistant</p>
634
+ </div>
635
+ <hr style='border: 1px solid #e0e0e0; margin: 20px auto;'>
636
+ """, unsafe_allow_html=True)
637
+
638
+ if not st.session_state["generated_questions"]:
639
+ st.markdown("""
640
+ <div style='text-align: center; margin-top: -10px; margin-bottom: 30px;'>
641
+ <h3 style='font-weight: 700; color: #333;'>πŸš€ Let's get started!</h3>
642
+ <p style='font-size: 1rem; color: #666;'>Select your interview domain and input type to begin your practice session.</p>
643
+ </div>
644
+ <hr style='border: 1px solid #e0e0e0; margin-top: 0px;'>
645
+ """, unsafe_allow_html=True)
646
+
647
+ # Example soft skills questions for HR/Soft Skills domain
648
+ if st.session_state["selected_domain"] == "Soft Skills":
649
+ hr_questions = [
650
+ "Tell me about yourself.",
651
+ "Why should we hire you?",
652
+ "What are your strengths and weaknesses?",
653
+ "What is the difference between hard work and smart work?",
654
+ "Why do you want to work at our company?",
655
+ "How do you feel about working nights and weekends?",
656
+ "Can you work under pressure?",
657
+ "What are your goals?",
658
+ "Are you willing to relocate or travel?",
659
+ "What motivates you to do good job?",
660
+ "What would you want to accomplish within your first 30 days of employment?",
661
+ "What do you prefer working alone or in collaborative environment?",
662
+ "Give me an example of your creativity.",
663
+ "How long would you expect to work for us if hired?",
664
+ "Are not you overqualified for this position?",
665
+ "Describe your ideal company, location and job.",
666
+ "Explain how would you be an asset to this organization?",
667
+ "What are your interests?",
668
+ "Would you lie for the company?",
669
+ "Who has inspired you in your life and why?",
670
+ "What was the toughest decision you ever had to make?",
671
+ "Have you considered starting your own business?",
672
+ "How do you define success and how do you measure up to your own definition?",
673
+ "Tell me something about our company.",
674
+ "How much salary do you expect?",
675
+ "Where do you see yourself five years from now?",
676
+ "Do you have any questions for me?",
677
+ "Are you a manager or a leader?",
678
+ "Imagine that you are not lucky enough to get this job, how will you take it?"
679
+ ]
680
+
681
+ # === Sidebar: Domain and Input Configuration ===
682
+ st.sidebar.subheader("Select Interview Domain:")
683
+ for domain in ["Analytics", "Finance", "Soft Skills"]:
684
+ if st.sidebar.button(domain):
685
+ st.session_state.clear() # πŸ” Reset entire session state
686
+ st.session_state["selected_domain"] = domain
687
+ st.rerun()
688
+
689
+ if not st.session_state["selected_domain"]:
690
+ st.sidebar.info("Please select a domain to continue.")
691
+ st.stop()
692
+
693
+ st.sidebar.markdown(f"**Selected Domain:** {st.session_state['selected_domain']}")
694
+ num_qs = st.sidebar.slider("Number of Questions:", 1, 10, 3)
695
+
696
+ if st.session_state["selected_domain"] == "Soft Skills":
697
+ if st.sidebar.button("Generate Questions"):
698
+ st.session_state["generated_questions"] = sample(hr_questions, num_qs)
699
+ st.session_state["current_question_index"] = 0
700
+ st.rerun()
701
+ else:
702
+ section_choice = st.sidebar.radio("Choose Input Type:", ("Resume", "Job Description", "Skills"))
703
+ difficulty = st.sidebar.selectbox("Select Difficulty Level:", ["Beginner", "Intermediate", "Advanced"])
704
+ input_text = ""
705
+
706
+ if section_choice == "Resume":
707
+ uploaded_file = st.sidebar.file_uploader("Upload Resume:", type=["pdf", "txt"])
708
+ if uploaded_file:
709
+ input_text = extract_pdf_text(uploaded_file)
710
+
711
+ elif section_choice == "Job Description":
712
+ input_text = st.sidebar.text_area("Paste Job Description:")
713
+
714
+ elif section_choice == "Skills":
715
+ input_text = ""
716
+
717
+ if st.session_state["selected_domain"] == "Finance":
718
+ finance_levels = ["Level-1", "Level-2", "Level-3"]
719
+ selected_level = st.sidebar.selectbox("Select a Finance Level:", finance_levels, key="finance_level_select")
720
+
721
+ difficulty = st.session_state.get("difficulty", "Beginner")
722
+
723
+ if selected_level != "Level-1":
724
+ st.sidebar.warning(f"🚧 {selected_level} content is still under development. Please select Level-1 to continue.")
725
+ st.stop()
726
+
727
+ # Map difficulty level to column in Excel
728
+ column_map = {
729
+ "Beginner": "MODULE 1-EASY",
730
+ "Intermediate": "MODULE 1-MEDIUM",
731
+ "Advanced": "MODULE 1-DIFFICULT"
732
+ }
733
+
734
+ selected_column = column_map[difficulty]
735
+
736
+ # Load Excel and questions
737
+ excel_path = os.path.join("data", "CIBOP Mock Questions.xlsx")
738
+ try:
739
+ df = pd.read_excel(excel_path, engine="openpyxl")
740
+ questions_from_excel = df[selected_column].dropna().astype(str).tolist()
741
+ input_text = selected_column # Optional, for tracking
742
+ except Exception as e:
743
+ st.sidebar.error(f"❌ Error reading Excel file: {e}")
744
+ st.stop()
745
+
746
+ st.sidebar.success(f"βœ… Loaded {difficulty}-level questions from {selected_level}")
747
+
748
+ else:
749
+ # For Analytics or any other domain
750
+ skills = {
751
+ "Analytics": ["Python", "SQL", "Machine Learning", "Statistics", "Business Analytics"]
752
+ }
753
+ skill_list = skills.get(st.session_state["selected_domain"], [])
754
+ if skill_list:
755
+ selected_skill = st.sidebar.selectbox("Select a Skill:", skill_list, key="skill_select")
756
+ input_text = selected_skill
757
+ st.sidebar.markdown(f"βœ… Selected Skill: **{selected_skill}**")
758
+
759
+
760
+ if st.sidebar.button("Generate Questions"):
761
+ if not input_text.strip():
762
+ st.warning("⚠️ Please provide input based on the selected method.")
763
+ st.stop()
764
+
765
+ if st.session_state["selected_domain"] == "Finance" and section_choice == "Skills":
766
+ st.session_state["generated_questions"] = sample(questions_from_excel, min(num_qs, len(questions_from_excel)))
767
+ else:
768
+ prompt = f"Ask {num_qs} direct and core-level {difficulty} interview questions related to {input_text}. Do not include intros or numbering."
769
+ model = genai.GenerativeModel('gemini-1.5-pro-latest')
770
+ response = model.generate_content([prompt, input_text])
771
+ lines = response.text.strip().split("\n")
772
+ questions = [q.strip("* ") for q in lines if q.strip()]
773
+ st.session_state["generated_questions"] = questions[:num_qs]
774
+
775
+ st.session_state["current_question_index"] = 0
776
+ st.session_state["answers"] = []
777
+ st.session_state["evaluation_feedback"] = ""
778
+ st.session_state["recorded_text"] = ""
779
+ st.session_state["response_captured"] = False
780
+ st.session_state["timer_start"] = None
781
+ st.session_state["show_summary"] = False
782
+ st.session_state["question_played"] = False
783
+ st.session_state["recording_complete"] = False
784
+ st.rerun()
785
+
786
+ def get_ice_servers():
787
+ """Use Twilio's TURN server because Streamlit Community Cloud has changed
788
+ its infrastructure and WebRTC connection cannot be established without TURN server now. # noqa: E501
789
+ We considered Open Relay Project (https://www.metered.ca/tools/openrelay/) too,
790
+ but it is not stable and hardly works as some people reported like https://github.com/aiortc/aiortc/issues/832#issuecomment-1482420656 # noqa: E501
791
+ See https://github.com/whitphx/streamlit-webrtc/issues/1213
792
+ """
793
+
794
+ # Ref: https://www.twilio.com/docs/stun-turn/api
795
+ try:
796
+ account_sid = os.environ["TWILIO_ACCOUNT_SID"]
797
+ auth_token = os.environ["TWILIO_AUTH_TOKEN"]
798
+ except KeyError:
799
+ logger.warning(
800
+ "Twilio credentials are not set. Fallback to a free STUN server from Google." # noqa: E501
801
+ )
802
+ return [{"urls": ["stun:stun.l.google.com:19302"]}]
803
+
804
+ client = Client(account_sid, auth_token)
805
+
806
+ token = client.tokens.create()
807
+
808
+ return token.ice_servers
809
+
810
+
811
+
812
+ # === Main QA Interface ===
813
+ if st.session_state["generated_questions"]:
814
+ idx = st.session_state["current_question_index"]
815
+ if idx < len(st.session_state["generated_questions"]):
816
+ question = st.session_state["generated_questions"][idx].lstrip("1234567890. ").strip()
817
+
818
+ # Phase 0: Play audio first and wait 5s before countdown
819
+ if not st.session_state.get("question_played"):
820
+ st.session_state["question_audio_file"] = asyncio.run(generate_question_audio(question))
821
+ st.session_state.update({
822
+ "question_played": True,
823
+ "question_start_time": time.time(),
824
+ "record_phase": "audio_playing",
825
+ "recorded_text": ""
826
+ })
827
+
828
+ st.markdown(f"**Q{idx + 1}:** {question}")
829
+ st.audio(st.session_state["question_audio_file"], format="audio/mp3")
830
+
831
+ now = time.time()
832
+ elapsed = now - st.session_state.get("question_start_time", 0)
833
+
834
+ if st.session_state["record_phase"] == "audio_playing":
835
+ if elapsed < 5:
836
+ st.markdown(f"<h4 class='timer-text'>πŸ”Š Playing question audio... Please listen</h4>", unsafe_allow_html=True)
837
+ time.sleep(1)
838
+ st.rerun()
839
+ else:
840
+ st.session_state["record_phase"] = "waiting_to_start"
841
+ st.session_state["question_start_time"] = time.time()
842
+ st.rerun()
843
+
844
+ elif st.session_state["record_phase"] == "waiting_to_start":
845
+ remaining = 10 - int(elapsed)
846
+ if remaining > 0:
847
+ st.markdown(f"<h4 class='timer-text'>⏳ {remaining} seconds to click 'Start Recording'...</h4>", unsafe_allow_html=True)
848
+ if st.button("πŸŽ™οΈ Start Recording"):
849
+ st.session_state.update({
850
+ "record_phase": "recording",
851
+ "timer_start": time.time(),
852
+ "recording_started": False
853
+ })
854
+ st.rerun()
855
+ time.sleep(1)
856
+ st.rerun()
857
+ else:
858
+ st.markdown("<div style='padding:10px; background:#fff8e1; border-left:5px solid orange;color: #212529;'>⚠️ <strong>No action detected.</strong> Automatically skipping to next question...</div>", unsafe_allow_html=True)
859
+ st.session_state["answers"].append({"question": question, "response": "[No response]"})
860
+ st.session_state.update({
861
+ "record_phase": "idle",
862
+ "question_played": False,
863
+ "question_start_time": 0.0,
864
+ "current_question_index": idx + 1
865
+ })
866
+ if st.session_state["current_question_index"] == len(st.session_state["generated_questions"]):
867
+ evaluate_answers()
868
+ st.session_state["show_summary"] = True
869
+ st.rerun()
870
+
871
+ elif st.session_state["record_phase"] == "recording":
872
+ remaining = 15 - int(now - st.session_state.get("timer_start", 0))
873
+ if remaining > 0:
874
+ st.markdown(f"<h4 class='timer-text'>πŸŽ™οΈ {remaining} seconds to answer...</h4>", unsafe_allow_html=True)
875
+
876
+ audio_value = st.audio_input("🎀 Tap to record β€” then stop when done", key=f"audio_{idx}")
877
+ if audio_value and "response_file" not in st.session_state:
878
+ wav_path = f"response_{idx}.wav"
879
+ with open(wav_path, "wb") as f:
880
+ f.write(audio_value.getbuffer())
881
+ #st.audio(wav_path, format="audio/wav")
882
+ st.session_state["response_file"] = wav_path
883
+ st.session_state["record_phase"] = "listening"
884
+ st.success("βœ… Audio uploaded. You may now confirm your answer.")
885
+ st.audio(wav_path, format="audio/wav")
886
+
887
+ if st.button("⏹️ Confirm & Next"):
888
+ try:
889
+ with st.spinner("🧠 Transcribing your answer..."):
890
+ result = model.transcribe(st.session_state["response_file"])
891
+ transcript = result["text"].strip()
892
+ if not transcript:
893
+ transcript = "[Transcription failed or empty]"
894
+
895
+ except Exception as e:
896
+ st.error(f"❌ Transcription error: {e}")
897
+ transcript = "[Transcription error]"
898
+
899
+ st.session_state["answers"].append({
900
+ "question": question,
901
+ "response_file": st.session_state["response_file"],
902
+ "response_text": transcript
903
+ })
904
+
905
+ if st.session_state["current_question_index"] == len(st.session_state["generated_questions"]):
906
+ evaluate_answers()
907
+ st.session_state["show_summary"] = True
908
+ st.rerun()
909
+
910
+
911
+
912
+ if elapsed > 15 and "response_file" not in st.session_state:
913
+ st.warning("⚠️ No audio captured. Moving to next question.")
914
+ st.session_state["answers"].append({
915
+ "question": question,
916
+ "response": "[No response]"
917
+ })
918
+
919
+ st.session_state.update({
920
+ "record_phase": "idle",
921
+ "question_played": False,
922
+ "current_question_index": idx + 1
923
+ })
924
+
925
+
926
+ if st.session_state["current_question_index"] == len(st.session_state["generated_questions"]):
927
+ evaluate_answers()
928
+ st.session_state["show_summary"] = True
929
+ st.rerun()
930
+
931
+
932
+
933
+ else:
934
+ st.markdown("<div style='padding:10px; background:#fff3e0; border-left:5px solid orange;'>⚠️ <strong>No response detected.</strong> Moving to next question...</div>", unsafe_allow_html=True)
935
+ st.session_state["answers"].append({"question": question, "response": "[No response]"})
936
+ st.session_state.update({
937
+ "record_phase": "idle",
938
+ "recording_started": False,
939
+ "question_played": False,
940
+ "question_start_time": 0.0,
941
+ "current_question_index": idx + 1
942
+ })
943
+ if st.session_state["current_question_index"] == len(st.session_state["generated_questions"]):
944
+ evaluate_answers()
945
+ st.session_state["show_summary"] = True
946
+ st.rerun()
947
+
948
+ elif st.session_state["record_phase"] == "listening":
949
+ st.success("🎧 Review your recorded response below:")
950
+ st.audio(st.session_state["response_file"], format="audio/wav")
951
+
952
+ if st.button("⏹️ Confirm & Next"):
953
+ st.session_state["answers"].append({
954
+ "question": question,
955
+ "response_file": st.session_state["response_file"]
956
+ })
957
+
958
+ st.session_state.update({
959
+ "record_phase": "idle",
960
+ "recording_started": False,
961
+ "question_played": False,
962
+ "question_start_time": 0.0,
963
+ "current_question_index": idx + 1,
964
+ "response_file": None,
965
+ "audio_waiting": True
966
+ })
967
+
968
+ if st.session_state["current_question_index"] == len(st.session_state["generated_questions"]):
969
+ evaluate_answers()
970
+ st.session_state["show_summary"] = True
971
+ st.rerun()
972
+
973
+
974
+ # === Summary Display ===
975
+
976
+ # === Summary Display ===
977
+ if st.session_state.get("show_summary", False):
978
+ st.subheader("πŸ“Š Complete Mock Interview Summary")
979
+
980
+ # Fetch values from session state, providing defaults
981
+ feedback_content_for_display = st.session_state.get('evaluation_feedback', "Evaluation details not available.")
982
+ if not isinstance(feedback_content_for_display, str):
983
+ feedback_content_for_display = str(feedback_content_for_display)
984
+
985
+ # Max score basis is the number of questions that were *generated* for the session
986
+ num_qs_in_session = len(st.session_state.get("generated_questions", []))
987
+ if num_qs_in_session == 0 and st.session_state.get("answers"): # Fallback if no generated_questions but answers exist
988
+ num_qs_in_session = len(st.session_state.answers)
989
+
990
+ max_score_possible_for_session = num_qs_in_session * 5.0
991
+ current_percentage_score = st.session_state.get('percentage_score', 0.0)
992
+ current_overall_score = st.session_state.get('overall_score', 0.0)
993
+
994
+ # Display the calculated score and percentage bar first in a card
995
+ st.markdown(f"""
996
+ <div class='summary-card' style="margin-bottom: 20px;">
997
+ <h4 style="color: #212529;">βœ… <strong>Overall Score:</strong> {current_overall_score:.1f} / {max_score_possible_for_session:.1f}
998
+ ({current_percentage_score:.1f}%)
999
+ </h4>
1000
+ <div style='margin:10px 0; position:relative;'>
1001
+ <div style="background:#eee; border-radius:10px; overflow:hidden; height:30px; position:relative;">
1002
+ <div style="
1003
+ width:{current_percentage_score}%;
1004
+ background:#00c851; /* Green for progress */
1005
+ height:100%;
1006
+ border-radius:10px 0 0 10px; /* Keep left radius for progress */
1007
+ transition: width 0.4s ease-in-out;
1008
+ "></div>
1009
+ <div style="
1010
+ position:absolute;
1011
+ top:0;
1012
+ left:0;
1013
+ width:100%;
1014
+ height:100%;
1015
+ display:flex;
1016
+ align-items:center;
1017
+ justify-content:center;
1018
+ font-weight:bold;
1019
+ color: black !important; /* Ensure text is visible on green/grey */
1020
+ font-size: 0.9rem;
1021
+ user-select:none; /* Prevent text selection */
1022
+ ">
1023
+ {current_percentage_score:.1f}%
1024
+ </div>
1025
+ </div>
1026
+ </div>
1027
+ </div>
1028
+ """, unsafe_allow_html=True)
1029
+
1030
+ # Display the detailed evaluation feedback text in a separate section
1031
+ st.markdown("---")
1032
+ st.markdown("<h4 style='color: #212529;'>Detailed Evaluation & Feedback from AI:</h4>", unsafe_allow_html=True)
1033
+
1034
+ # Use a styled div for the feedback text block to ensure good readability
1035
+ # Replace newlines with <br> for proper HTML multiline display
1036
+ html_formatted_feedback = feedback_content_for_display.replace('\n', '<br>')
1037
+ st.markdown(f"""
1038
+ <div style="background-color: #ffffff; color: #212529; padding: 15px; border-radius: 8px; border: 1px solid #e0e0e0; margin-top:10px; max-height: 500px; overflow-y: auto; white-space: normal; word-wrap: break-word;">
1039
+ {html_formatted_feedback}
1040
+ </div>
1041
+ """, unsafe_allow_html=True)
1042
+
1043
+ st.markdown("---") # Separator
1044
+
1045
+ # Buttons for suggestions, download, practice
1046
+ cols_summary_buttons = st.columns([1, 1, 1]) # 3 columns for the buttons
1047
+
1048
+ with cols_summary_buttons[0]:
1049
+ if st.button("πŸ’‘ Get Improvement Suggestions", key="get_suggestions_btn_final", use_container_width=True):
1050
+ # Regenerate suggestions if not present or explicitly requested again
1051
+ generate_improvement_suggestions() # This function should handle st.info/st.success
1052
+ st.rerun() # Rerun to show the expander or updated suggestions
1053
+
1054
+ # Helper function to prepare summary text for download
1055
+ def prepare_summary_for_download():
1056
+ download_text = f"# GrillMaster Mock Interview Summary\n\n"
1057
+ download_text += f"**Selected Domain:** {st.session_state.get('selected_domain', 'N/A')}\n"
1058
+ dl_difficulty = st.session_state.get('difficulty_level_select', 'N/A')
1059
+ download_text += f"**Difficulty Level:** {dl_difficulty}\n"
1060
+
1061
+ num_q_for_max_score = len(st.session_state.get("generated_questions", st.session_state.get("answers",[])))
1062
+ max_s_for_dl = num_q_for_max_score * 5.0
1063
+
1064
+ download_text += f"**Calculated Overall Score:** {st.session_state.get('overall_score', 0.0):.1f} / {max_s_for_dl:.1f} ({st.session_state.get('percentage_score', 0.0):.1f}%)\n\n"
1065
+
1066
+ download_text += "## Questions & Candidate's Answers:\n"
1067
+ num_answers_actually_given = len(st.session_state.get("answers", []))
1068
+ for i in range(num_q_for_max_score):
1069
+ question_text_dl = st.session_state.generated_questions[i] if i < len(st.session_state.generated_questions) else "Question text not found"
1070
+ answer_text_dl = "[No answer recorded]"
1071
+ if i < num_answers_actually_given:
1072
+ answer_text_dl = str(st.session_state.answers[i].get('response', '[No response provided]'))
1073
+
1074
+ download_text += f"**Question {i+1}:** {question_text_dl}\n"
1075
+ download_text += f"**Your Answer {i+1}:** {answer_text_dl}\n\n"
1076
+
1077
+ download_text += "\n## AI Evaluation Details (Includes Parsed Scores and Qualitative Feedback):\n"
1078
+ # st.session_state.evaluation_feedback is now already pre-formatted
1079
+ download_text += st.session_state.get('evaluation_feedback', "No AI evaluation available.")
1080
+ download_text += "\n\n"
1081
+
1082
+ if st.session_state.get("improvement_suggestions_generated", False) and st.session_state.get("improvement_suggestions"):
1083
+ download_text += "\n## Detailed Improvement Suggestions from AI:\n"
1084
+ download_text += st.session_state.get('improvement_suggestions', "No improvement suggestions were generated.")
1085
+
1086
+ return download_text.encode('utf-8')
1087
+
1088
+ with cols_summary_buttons[1]:
1089
+ summary_bytes_dl_final = prepare_summary_for_download()
1090
+ st.download_button(
1091
+ label="πŸ’Ύ Download Full Summary",
1092
+ data=summary_bytes_dl_final,
1093
+ file_name=f"GrillMaster_Summary_{st.session_state.get('selected_domain','General')}_{time.strftime('%Y%m%d_%H%M')}.md",
1094
+ mime="text/markdown",
1095
+ key="download_summary_final_btn",
1096
+ use_container_width=True
1097
+ )
1098
+
1099
+
1100
+
1101
+ # Expander for detailed suggestions, shown if generated
1102
+ if st.session_state.get("improvement_suggestions_generated", False) and st.session_state.get("improvement_suggestions"):
1103
+ with st.expander("πŸ” View Detailed Improvement Suggestions", expanded=True): # Default to expanded once generated
1104
+ st.markdown(st.session_state.improvement_suggestions, unsafe_allow_html=True) # LLM might use markdown
1105
+
1106
+ # Conditional button for low scores
1107
+ if current_percentage_score < 50.0:
1108
+ st.warning(f"Your score is {current_percentage_score:.1f}%. Keep practicing! You can also reset all settings to try a new domain or difficulty.")
1109
+ if st.button("πŸ” Practice Again & Reset All Settings", key="practice_full_reset_final_btn", use_container_width=True):
1110
+ # Clear all session state keys and re-initialize to defaults
1111
+ keys_to_fully_clear = list(st.session_state.keys())
1112
+ for key_to_del_full in keys_to_fully_clear:
1113
+ del st.session_state[key_to_del_full]
1114
+