Imarticuslearning commited on
Commit
b8a9a76
Β·
verified Β·
1 Parent(s): 40087b0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +548 -262
app.py CHANGED
@@ -1,16 +1,20 @@
1
-
2
  import os
3
  import re
4
  import time
5
  from dotenv import load_dotenv
6
  import streamlit as st
7
- from gtts import gTTS
8
  import PyPDF2
9
  import google.generativeai as genai
10
  import speech_recognition as sr
11
  from random import sample
12
  import random
13
  from html import escape
 
 
 
 
 
 
14
 
15
  # βœ… MUST be the first Streamlit command
16
  st.set_page_config(page_title="GrillMaster", layout="wide")
@@ -19,7 +23,6 @@ st.set_page_config(page_title="GrillMaster", layout="wide")
19
  load_dotenv()
20
  genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
21
 
22
-
23
  # Initialize session state
24
  for key, default in {
25
  "generated_questions": [],
@@ -77,257 +80,420 @@ def get_questions(prompt, input_text, num_questions=3, max_retries=10):
77
 
78
  return new_questions
79
 
80
-
81
- # def evaluate_answers():
82
- # model = genai.GenerativeModel('gemini-1.5-pro-latest')
83
- # prompt = """
84
- # You are an expert interview evaluator. Assess responses based on:
85
- # - Conceptual Understanding
86
- # - Communication Skills
87
- # - Clarity & Depth of Explanation
88
- # - Use of Real-World Examples
89
- # - Logical Flow
90
-
91
- # Provide a score (out of 10) and an evaluation summary.
92
-
93
- # **Format:**
94
- # **Overall Score:** x/10
95
- # **Evaluation Summary:**
96
- # - Concept Understanding: .
97
- # - Communication: .
98
- # - Depth of Explanation: .
99
- # - Examples: .
100
- # - Logical Flow: .
101
- # """
102
- # candidate_responses = "\n\n".join(
103
- # [f"Q: {entry['question']}\nA: {entry['response']}" for entry in st.session_state["answers"]]
104
- # )
105
- # full_prompt = f"{prompt}\n\nCandidate Responses:\n{candidate_responses}"
106
- # response = model.generate_content(full_prompt)
107
- # st.session_state["evaluation_feedback"] = response.text.strip()
108
- # match = re.search(r"\*\*Overall Score:\*\* (\d+)/10", response.text)
109
- # st.session_state["overall_score"] = int(match.group(1)) if match else 0
110
- # st.session_state["percentage_score"] = st.session_state["overall_score"] * 10
111
-
112
- import asyncio
113
- import edge_tts
114
-
115
- import re
116
- import tempfile
117
- import asyncio
118
- import edge_tts
119
-
120
  async def generate_question_audio(question, voice="en-IE-EmilyNeural"):
121
  clean_question = re.sub(r'[^A-Za-z0-9.,?! ]+', '', question)
122
  tts = edge_tts.Communicate(text=clean_question, voice=voice)
123
  with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file:
124
  await tts.save(tmp_file.name)
125
  return tmp_file.name
126
-
127
-
128
- # === KEYWORD-BASED SCORING LOGIC ===
129
-
130
- KEYWORDS = {
131
- "Analytics": {
132
- "Python": ["loops", "list", "dictionary", "function", "pandas", "numpy"],
133
- "SQL": ["join", "group by", "select", "where", "index", "foreign key"],
134
- "Machine Learning": ["model", "features", "training", "accuracy", "regression"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
  },
136
- "Finance": {
137
- "Fund Accounting": ["NAV", "mutual fund", "reconciliation", "journal entry"],
138
- "AML/KYC": ["customer", "verification", "risk", "compliance", "money laundering"]
139
  },
140
- "Soft Skills": {
141
- "default": ["communication", "teamwork", "problem solving", "motivation", "adaptability"]
 
 
 
 
 
142
  }
143
  }
144
 
145
- def score_answer(answer_text, domain, skill):
146
- answer = answer_text.strip().lower()
147
- if answer in ["", "[no response]", "no response", "skipped"]:
148
- return 0.0
149
- if any(phrase in answer for phrase in ["don't know", "not sure", "unaware"]):
150
- return 0.0
151
 
152
- keywords = KEYWORDS.get(domain, {}).get(skill, KEYWORDS["Soft Skills"]["default"])
153
- match_count = sum(1 for kw in keywords if kw in answer)
154
- match_ratio = match_count / len(keywords) if keywords else 0
155
 
156
- if match_ratio == 0:
157
- return 1.5
158
- elif match_ratio <= 0.5:
159
- return 3.0
160
- else:
161
- return 5.0
162
 
163
- # Evaluate candidate answers - YOUR FUNCTION
164
-
165
 
166
-
167
- def evaluate_answers():
168
  model = genai.GenerativeModel('gemini-1.5-pro-latest')
169
  difficulty_level = st.session_state.get("difficulty_level_select", "Beginner")
170
  level_string = difficulty_level.lower()
171
 
172
- # --- Start: Check for all no-responses ---
173
- all_no_response = True
174
  if not st.session_state.get("answers"):
175
- all_no_response = True
176
- else:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
177
  for entry in st.session_state["answers"]:
178
  response_text = str(entry.get('response', '')).strip().lower()
179
  no_response_placeholders = [
180
  "", "[no response provided]", "[no response - timed out]",
181
  "[no response]", "no response", "[could not understand audio]",
182
- "[no clear response recorded]"
 
 
 
 
183
  ]
184
  if response_text not in no_response_placeholders:
185
- all_no_response = False
186
  break
187
 
188
- if all_no_response:
189
- st.session_state["evaluation_feedback"] = (
190
- "**Overall Score:** 0/10\n"
191
- "**Evaluation Summary:**\n"
192
- "- Concept Understanding: N/A - No response provided.\n"
193
- "- Communication: N/A - No response provided.\n"
194
- "- Depth of Explanation: N/A - No response provided.\n"
195
- "- Examples: N/A - No response provided.\n"
196
- "- Logical Flow: N/A - No response provided.\n\n"
197
- "The candidate did not provide any meaningful answers."
198
- )
199
- st.session_state["overall_score"] = 0
200
- st.session_state["percentage_score"] = 0
 
 
 
 
 
 
201
  return
202
- # --- End: Check for all no-responses ---
203
-
204
- base_assessment_criteria = """
205
- Assess responses based on:
206
- - Conceptual Understanding (effort and relevance more than perfect accuracy)
207
- - Communication Clarity (can the core idea be understood?)
208
- - Depth of Explanation (relative to expected level)
209
- - Use of Examples (if any, and if appropriate for the level)
210
- - Logical Flow (is there a basic structure or train of thought?)
211
- """
212
 
213
- if level_string == "beginner":
214
- level_specific_instructions = """
215
- You are an extremely understanding and encouraging interview evaluator for a **BEGINNER/FRESHER**. Your primary goal is to build confidence.
216
- **Scoring Guidelines for Beginners:**
217
- - Be VERY lenient. Focus on ANY sign of understanding or effort.
218
- - A one-liner, if relevant, deserves a good score (e.g., 6-8/10). Partial correctness with effort: 7-9/10. Generally correct but brief: 8-10/10.(Just doubles the score)
219
- - Only score 0/10 if completely irrelevant or no attempt.
220
- - If all answers are empty/placeholders (like '[No response]'), overall score MUST be 0/10.
221
- Provide highly positive, motivating feedback.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222
  """
223
- elif level_string == "intermediate":
224
- level_specific_instructions = """
225
- You are a supportive evaluator for an **INTERMEDIATE** candidate. Expect reasonable conceptual grasp.
226
- **Scoring Guidelines for Intermediate:**
227
- - 7–10: Mostly correct, clear, good understanding.
228
- - 4–6: Partially correct, or needs clarity/depth.
229
- - 1–3: Largely incorrect or irrelevant.
230
- - Overall score 0/10 if all responses are empty.
231
- Provide balanced feedback.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232
  """
233
- else: # Advanced
234
- level_specific_instructions = """
235
- You are a discerning evaluator for an **ADVANCED** candidate. Expect accuracy and depth.
236
- **Scoring Guidelines for Advanced:**
237
- - 8–10: Accurate, comprehensive, deep understanding.
238
- - 5–7: Generally correct but lacks some depth/precision.
239
- - 1–4: Significant inaccuracies or superficial.
240
- - Overall score 0/10 if all responses are empty.
241
- Provide precise feedback.
242
  """
243
-
244
- evaluation_prompt_template = f"""
245
- {level_specific_instructions}
246
- {base_assessment_criteria}
247
-
248
- **YOUR RESPONSE MUST STRICTLY START WITH THE OVERALL SCORE ON THE VERY FIRST LINE.**
249
- Follow this exact format for your entire output:
250
-
251
- *Overall Score:* [score]/10
252
- *Evaluation Summary:*
253
- - Concept Understanding: [Your feedback here]
254
- - Communication: [Your feedback here]
255
- - Depth of Explanation: [Your feedback here]
256
- - Examples: [Your feedback here]
257
- - Logical Flow: [Your feedback here]
258
- [Any additional overall encouraging remarks can optionally follow here]
259
-
260
- The [score] must be a number (e.g., 7 or 7.5) between 0 and 10.
261
- """
262
-
263
- candidate_responses_formatted = "\n\n".join(
264
- [f"Q: {entry['question']}\nA: {str(entry.get('response', '[No response provided]'))}" for entry in st.session_state["answers"]]
265
- )
266
- full_prompt_for_evaluation = f"{evaluation_prompt_template}\n\nCandidate Responses:\n{candidate_responses_formatted}"
267
-
268
- try:
269
- response_content = model.generate_content(full_prompt_for_evaluation)
270
- st.session_state["evaluation_feedback"] = response_content.text.strip()
271
-
272
- extracted_text_for_scoring = response_content.text.strip()
273
- print("--- LLM Output for Score Extraction (evaluate_answers) ---")
274
- print(extracted_text_for_scoring)
275
- print("----------------------------------------------------------")
276
 
277
- overall_score_val = 0.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
278
 
279
- # Pattern 1: More flexible, looks for "Overall Score" then captures number/10
280
- score_pattern_flexible = r"(?i).*Overall Score[\s:]*(\d+(?:\.\d+)?)\s*/\s*10"
281
- score_match = re.search(score_pattern_flexible, extracted_text_for_scoring)
282
 
283
- if score_match:
284
- try:
285
- score_text = score_match.group(1)
286
- print(f"Flexible Pattern Matched! Score text: '{score_text}', Full context: '{score_match.group(0)}'")
287
- overall_score_val = float(score_text)
288
- if overall_score_val.is_integer():
289
- overall_score_val = int(overall_score_val)
290
- print(f"Parsed score value: {overall_score_val}")
291
- except ValueError:
292
- st.warning(f"Flexible pattern matched, but could not parse '{score_text}' as a number. Defaulting score to 0.")
293
- print(f"ValueError during parsing (flexible pattern). Score text: '{score_text}'")
294
- overall_score_val = 0.0
295
- else:
296
- # Fallback Pattern: Simplest possible X/10 if "Overall Score" line completely missing/mangled
297
- score_pattern_fallback = r"(\d+(?:\.\d+)?)\s*/\s*10"
298
- # Search for fallback only if primary pattern fails
299
- print(f"Flexible pattern ('{score_pattern_flexible}') did not match. Trying fallback.")
300
- score_match_fallback = re.search(score_pattern_fallback, extracted_text_for_scoring)
301
- if score_match_fallback:
302
  try:
303
- score_text = score_match_fallback.group(1)
304
- print(f"Fallback Pattern Matched! Score text: '{score_text}', Full context: '{score_match_fallback.group(0)}'")
305
- overall_score_val = float(score_text)
306
- if overall_score_val.is_integer():
307
- overall_score_val = int(overall_score_val)
308
- print(f"Parsed score value (from fallback): {overall_score_val}")
309
- st.warning("Used fallback regex to find score. LLM format for 'Overall Score' line was unexpected.")
310
  except ValueError:
311
- st.warning(f"Fallback pattern matched, but could not parse '{score_text}' as a number. Score set to 0.")
312
- print(f"ValueError during parsing (fallback pattern). Score text: '{score_text}'")
313
- overall_score_val = 0.0
314
- else:
315
- st.warning(f"Could not find any 'X/10' score pattern in the LLM response. Score defaulted to 0. LLM Output (first 300 chars):\n{extracted_text_for_scoring[:300]}...")
316
- print(f"All score patterns failed. LLM output did not contain a recognizable score.")
317
- overall_score_val = 0.0
318
-
319
- st.session_state["overall_score"] = overall_score_val
320
- st.session_state["percentage_score"] = float(overall_score_val) * 10
321
-
322
- except Exception as e:
323
- st.error(f"An error occurred during evaluation or score parsing: {e}")
324
- st.session_state["evaluation_feedback"] = (
325
- f"Could not evaluate answers due to an error: {e}. "
326
- f"LLM output might be missing or malformed."
327
- )
328
- st.session_state["overall_score"] = 0
329
- st.session_state["percentage_score"] = 0
330
-
 
 
 
 
 
 
 
 
 
 
 
 
 
331
  # --- Prompts for Question Generation ---
332
  BEGINNER_PROMPT = """
333
  You are a friendly mock interview trainer conducting a **Beginner-level** spoken interview in the domain of **{domain}**.
@@ -387,7 +553,7 @@ Ensure the questions are clear, to the point, and suitable for a {difficulty_lev
387
  ❌ Avoid vague or open-ended statementsβ€”each question should be concise and specific.
388
  """
389
 
390
-
391
  # UI styles
392
  st.markdown("""
393
  <style>
@@ -511,17 +677,8 @@ if st.session_state["selected_domain"] == "Soft Skills":
511
  st.sidebar.subheader("Select Interview Domain:")
512
  for domain in ["Analytics", "Finance", "Soft Skills"]:
513
  if st.sidebar.button(domain):
 
514
  st.session_state["selected_domain"] = domain
515
- st.session_state["generated_questions"] = []
516
- st.session_state["current_question_index"] = 0
517
- st.session_state["answers"] = []
518
- st.session_state["evaluation_feedback"] = ""
519
- st.session_state["recorded_text"] = ""
520
- st.session_state["response_captured"] = False
521
- st.session_state["timer_start"] = None
522
- st.session_state["show_summary"] = False
523
- st.session_state["question_played"] = False
524
- st.session_state["recording_complete"] = False
525
  st.rerun()
526
 
527
  if not st.session_state["selected_domain"]:
@@ -550,28 +707,66 @@ else:
550
  input_text = st.sidebar.text_area("Paste Job Description:")
551
 
552
  elif section_choice == "Skills":
553
- # Define available skills for each domain
554
- skills = {
555
- "Analytics": ["Python", "SQL", "Machine Learning", "Statistics", "Business Analytics"],
556
- "Finance": ["Fund Accounting", "AML/KYC", "Derivatives"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
557
  }
558
- skill_list = skills.get(st.session_state["selected_domain"], [])
559
- if skill_list:
560
- selected_skill = st.sidebar.selectbox("Select a Skill:", skill_list, key="skill_select")
561
- input_text = selected_skill
562
- st.sidebar.markdown(f"βœ… Selected Skill: **{selected_skill}**") # Debug display
 
563
 
564
  if st.sidebar.button("Generate Questions"):
565
  if not input_text.strip():
566
  st.warning("⚠️ Please provide input based on the selected method.")
567
  st.stop()
568
 
569
- prompt = f"Ask {num_qs} direct and core-level {difficulty} interview questions related to {input_text}. Do not include intros or numbering."
570
- model = genai.GenerativeModel('gemini-1.5-pro-latest')
571
- response = model.generate_content([prompt, input_text])
572
- lines = response.text.strip().split("\n")
573
- questions = [q.strip("* ") for q in lines if q.strip()]
574
- st.session_state["generated_questions"] = questions[:num_qs]
 
 
 
 
575
  st.session_state["current_question_index"] = 0
576
  st.session_state["answers"] = []
577
  st.session_state["evaluation_feedback"] = ""
@@ -696,30 +891,39 @@ if st.session_state["generated_questions"]:
696
  st.rerun()
697
 
698
 
 
 
699
  # === Summary Display ===
700
  if st.session_state.get("show_summary", False):
701
- # st.balloons()
702
  st.subheader("πŸ“Š Complete Mock Interview Summary")
703
 
704
- feedback_content = st.session_state.get('evaluation_feedback', "Evaluation not yet available.")
705
- if not isinstance(feedback_content, str):
706
- feedback_content = str(feedback_content)
 
707
 
708
- # Escape HTML special characters from the feedback for security
709
- escaped_feedback_for_html = escape(feedback_content)
710
- # Replace newlines with <br> for HTML display within markdown
711
- formatted_feedback_for_markdown = escaped_feedback_for_html.replace("\n", "<br>")
 
 
 
 
712
 
 
713
  st.markdown(f"""
714
- <div class='summary-card'>
715
- <h4 style="color: #212529;">βœ… <strong>Overall Score (Model Evaluation):</strong></h4>{st.session_state.get('percentage_score', 0):.0f}% ({st.session_state.get('overall_score',0)}/10)</h4>
 
 
716
  <div style='margin:10px 0; position:relative;'>
717
- <div style="color: #212529; background-color: #00c851; border-radius:10px; overflow:hidden; height:30px; position:relative;">
718
  <div style="
719
- width:{st.session_state.get('percentage_score', 0)}%;
720
- background:#00c851;
721
  height:100%;
722
- border-radius:10px 0 0 10px;
723
  transition: width 0.4s ease-in-out;
724
  "></div>
725
  <div style="
@@ -732,17 +936,99 @@ if st.session_state.get("show_summary", False):
732
  align-items:center;
733
  justify-content:center;
734
  font-weight:bold;
735
- color: black !important;
736
  font-size: 0.9rem;
737
- user-select:none;
738
  ">
739
- {st.session_state.get('percentage_score', 0):.0f}%
740
  </div>
741
  </div>
742
  </div>
743
- <h4 style="color: #212529;">Detailed Evaluation:</h4>
744
- <div style="color: #212529; background-color: #ffffff; padding: 10px; border-radius: 5px; border: 1px solid #eee; margin-top: 5px; white-space: pre-wrap; word-wrap: break-word; max-height: 400px; overflow-y: auto;">
745
- {formatted_feedback_for_markdown}
746
- </div>
747
  </div>
748
  """, unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import re
3
  import time
4
  from dotenv import load_dotenv
5
  import streamlit as st
 
6
  import PyPDF2
7
  import google.generativeai as genai
8
  import speech_recognition as sr
9
  from random import sample
10
  import random
11
  from html import escape
12
+ import asyncio
13
+ import edge_tts
14
+ import pandas as pd
15
+ import tempfile
16
+ import traceback
17
+
18
 
19
  # βœ… MUST be the first Streamlit command
20
  st.set_page_config(page_title="GrillMaster", layout="wide")
 
23
  load_dotenv()
24
  genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
25
 
 
26
  # Initialize session state
27
  for key, default in {
28
  "generated_questions": [],
 
80
 
81
  return new_questions
82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  async def generate_question_audio(question, voice="en-IE-EmilyNeural"):
84
  clean_question = re.sub(r'[^A-Za-z0-9.,?! ]+', '', question)
85
  tts = edge_tts.Communicate(text=clean_question, voice=voice)
86
  with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file:
87
  await tts.save(tmp_file.name)
88
  return tmp_file.name
89
+
90
+ ########################################///////////////////////////////////////////////////#########################################
91
+
92
+ # HR_PARAMETERS_CONFIG - Updated based on your latest Excel sheet (input_file_0.png)
93
+ # These are the parameters that can be judged from audio/text responses.
94
+ HR_PARAMETERS_CONFIG = {
95
+ "Voice Modulation": { # Non-Verbal Cues
96
+ "weight_original": 5,
97
+ "rubric": "1-5 (5=Good pace/tone, conversational; 3=Sounds Scripted/Slight Monotony; 1=Flat tone/Robotic)"
98
+ },
99
+ "Confidence": { # Personality
100
+ "weight_original": 7,
101
+ "rubric": "1-5 (5=Bold & Confident throughout; 3=Confused/Nervous in parts; 1=Extremely nervous/Timid)"
102
+ },
103
+ "Attitude": { # Personality
104
+ "weight_original": 3,
105
+ "rubric": "1-5 (5=Assertive, Positive, Open; 3=Neutral/Mildly defensive; 1=Aggressive/Pessimistic/Dismissive)"
106
+ },
107
+ "Flow & Fluency": { # Articulation
108
+ "weight_original": 20,
109
+ "rubric": "1-5 (5=Excellent Fluency, Spontaneous; 3=Initially struggles, then manages/Takes some time; 1=Many fillers/Pauses/Dead silence)"
110
+ },
111
+ "Structured thoughts & Clarity": { # Articulation
112
+ "weight_original": 10,
113
+ "rubric": "1-5 (5=Organized, Crisp, Coherent thoughts, e.g. STAR method; 3=Ideas are okay but clarity/structure could be better; 1=Incoherent/Rambling/Struggles to put thoughts into words)"
114
+ },
115
+ "Sentence Formation": { # Language Skills
116
+ "weight_original": 20,
117
+ "rubric": "1-5 (5=Good Clarity, Variety in sentence structure, Good Vocab; 3=Decent communication, might find some words difficult; 1=Talks in fragments/one-liners, Hard to understand)"
118
  },
119
+ "Basics of Grammar + SVA": { # Language Skills (SVA = Subject-Verb Agreement)
120
+ "weight_original": 10,
121
+ "rubric": "1-5 (5=Good Command over Language, Minimal errors; 3=Average communicator, some errors but understandable; 1=Makes a lot of Grammatical Errors impacting clarity)"
122
  },
123
+ "Persuasiveness": { # Rapport Building
124
+ "weight_original": 3,
125
+ "rubric": "1-5 (5=Impactful, Convincing Answers, Connects with interviewer; 3=Average or Common Answers; 1=Lacks Presence of Mind/No connection)"
126
+ },
127
+ "Quality of Answers": { # Rapport Building
128
+ "weight_original": 7,
129
+ "rubric": "1-5 (5=Handles questions well, Relevant & Thoughtful Answers, Asks good questions; 3=Very Generic Answers; 1=Vague/Lacks Depth/Shallow/Irrelevant)"
130
  }
131
  }
132
 
133
+ # Calculate total original weight for normalization
134
+ TOTAL_ORIGINAL_WEIGHT_HR = sum(param_data["weight_original"] for param_data in HR_PARAMETERS_CONFIG.values()) # Should be 85
 
 
 
 
135
 
136
+ # Add normalized weights to the config for calculating score out of 100
137
+ for param in HR_PARAMETERS_CONFIG:
138
+ HR_PARAMETERS_CONFIG[param]["weight_normalized"] = (HR_PARAMETERS_CONFIG[param]["weight_original"] / TOTAL_ORIGINAL_WEIGHT_HR) * 100
139
 
 
 
 
 
 
 
140
 
141
+ ########################################///////////////////////////////////////////////////#########################################
142
+ # SUmmary of improvement(function)
143
 
144
+ def generate_improvement_suggestions():
 
145
  model = genai.GenerativeModel('gemini-1.5-pro-latest')
146
  difficulty_level = st.session_state.get("difficulty_level_select", "Beginner")
147
  level_string = difficulty_level.lower()
148
 
 
 
149
  if not st.session_state.get("answers"):
150
+ st.session_state.improvement_suggestions = "No answers were recorded to generate improvement suggestions."
151
+ return
152
+
153
+ # Prepare the context for the LLM
154
+ qa_context = []
155
+ for i, entry in enumerate(st.session_state["answers"]):
156
+ qa_context.append(
157
+ f"Question {i+1}: {entry['question']}\n"
158
+ f"Candidate's Answer {i+1}: {str(entry.get('response', '[No response provided]'))}"
159
+ )
160
+ full_qa_context = "\n\n".join(qa_context)
161
+
162
+ initial_evaluation_feedback = st.session_state.get("evaluation_feedback", "Initial evaluation not available.")
163
+
164
+ # Remove any previous "Total Calculated Score..." line from the initial feedback
165
+ # to avoid confusing the LLM when it sees it as part of the context.
166
+ initial_evaluation_lines = initial_evaluation_feedback.splitlines()
167
+ cleaned_initial_evaluation = "\n".join(
168
+ line for line in initial_evaluation_lines if not line.strip().startswith("**Total Calculated Score:**")
169
+ )
170
+
171
+
172
+ improvement_prompt_template = """
173
+ You are an expert interview coach. You have the following information about a candidate's mock interview:
174
+ - Candidate's Level: {level_string}
175
+ - Questions Asked and Candidate's Answers:
176
+ {full_qa_context}
177
+ - Initial Evaluation Feedback Provided to Candidate:
178
+ ---
179
+ {cleaned_initial_evaluation}
180
+ ---
181
+
182
+ Based on all this information, your task is to provide DETAILED and CONSTRUCTIVE suggestions for each question to help the candidate improve. Be supportive and encouraging.
183
+
184
+ For EACH question, please provide:
185
+ 1. **How to Improve This Answer:** Specific, actionable advice on what the candidate could have added, clarified, or approached differently to make their answer better for their {level_string} level. Focus on 1-2 key improvement points.
186
+ 2. **Hints for an Ideal Answer:** Briefly mention 2-3 key concepts, terms, or elements that a strong answer (appropriate for their {level_string} level) would typically include. DO NOT provide a full model answer, just hints and pointers.
187
+
188
+ Keep the tone positive and focused on learning.
189
+
190
+ Structure your response clearly for each question. Example for one question:
191
+
192
+ ---
193
+ **Regarding Question X: "[Original Question Text Here]"**
194
+
195
+ *How to Improve This Answer:*
196
+ [Your specific suggestion 1 for improvement...]
197
+ [Your specific suggestion 2 for improvement...]
198
+
199
+ *Hints for an Ideal Answer (Key Points to Consider):*
200
+ - Hint 1 or Key concept 1
201
+ - Hint 2 or Key concept 2
202
+ - Hint 3 or Key element 3 (optional)
203
+ ---
204
+ (Repeat this structure for all questions)
205
+ """
206
+
207
+ formatted_improvement_prompt = improvement_prompt_template.format(
208
+ level_string=level_string,
209
+ full_qa_context=full_qa_context,
210
+ cleaned_initial_evaluation=cleaned_initial_evaluation
211
+ )
212
+
213
+ try:
214
+ st.info("πŸ€– Generating detailed improvement suggestions... Please wait.")
215
+ response = model.generate_content(formatted_improvement_prompt)
216
+ st.session_state.improvement_suggestions = response.text.strip()
217
+ st.session_state.improvement_suggestions_generated = True
218
+ st.success("Detailed suggestions generated!")
219
+ except Exception as e:
220
+ st.error(f"Error generating improvement suggestions: {e}")
221
+ st.session_state.improvement_suggestions = f"Could not generate suggestions due to an error: {e}"
222
+ st.session_state.improvement_suggestions_generated = False
223
+
224
+ ########################################///////////////////////////////////////////////////#########################################
225
+
226
+ # Evaluate candidate answers - YOUR FUNCTION
227
+
228
+
229
+
230
+ def evaluate_answers():
231
+ model = genai.GenerativeModel('gemini-1.5-pro-latest')
232
+ # difficulty_level_select is the key for the difficulty selectbox in your sidebar
233
+ difficulty_level = st.session_state.get("difficulty_level_select", "Beginner")
234
+ level_string = difficulty_level.lower()
235
+ num_answered_questions = len(st.session_state.get("answers", []))
236
+
237
+ # Reset improvement suggestions flag when re-evaluating
238
+ st.session_state.improvement_suggestions_generated = False
239
+ st.session_state.improvement_suggestions = ""
240
+
241
+ meaningful_answers_exist = False
242
+ if st.session_state.get("answers"):
243
  for entry in st.session_state["answers"]:
244
  response_text = str(entry.get('response', '')).strip().lower()
245
  no_response_placeholders = [
246
  "", "[no response provided]", "[no response - timed out]",
247
  "[no response]", "no response", "[could not understand audio]",
248
+ "[no clear response recorded]", "[no action - timed out before recording]",
249
+ "[no speech detected in recording time]", "[no speech recorded - time up]",
250
+ "[recording stopped manually, possibly empty]",
251
+ "[no action - did not start recording]",
252
+ "[no speech detected in recording phase]"
253
  ]
254
  if response_text not in no_response_placeholders:
255
+ meaningful_answers_exist = True
256
  break
257
 
258
+ if not meaningful_answers_exist:
259
+ no_answer_feedback_qualitative = "No meaningful answers were provided for evaluation.\n\n"
260
+ if st.session_state.selected_domain == "Soft Skills":
261
+ hr_params_na = "\n".join([f"- {param}: 0/5" for param in HR_PARAMETERS_CONFIG.keys()])
262
+ no_answer_feedback = (
263
+ "No meaningful answers were provided for evaluation.\n\n"
264
+ f"**Parameter Scores (1-5):**\n{hr_params_na}\n\n"
265
+ "**Overall Qualitative Feedback:**\nCandidate did not provide responses to evaluate soft skills."
266
+ )
267
+ st.session_state["hr_parameter_scores_dict"] = {param: 0.0 for param in HR_PARAMETERS_CONFIG.keys()} # Store zeroed scores
268
+ else: # Non-HR domains
269
+ no_answer_feedback = (
270
+ "No meaningful answers were provided.\n"
271
+ "**Total Calculated Score:** 0.0 / 0.0 (0.0%)\n\n" # Placeholder for non-HR if no answers
272
+ "**Overall Evaluation Summary:** N/A"
273
+ )
274
+ st.session_state["evaluation_feedback"] = no_answer_feedback
275
+ st.session_state["overall_score"] = 0.0
276
+ st.session_state["percentage_score"] = 0.0
277
  return
 
 
 
 
 
 
 
 
 
 
278
 
279
+ # --- BRANCHING FOR HR (SOFT SKILLS) VS OTHER DOMAINS ---
280
+ if st.session_state.selected_domain == "Soft Skills":
281
+ hr_prompt_parameter_list = ""
282
+ for param, config in HR_PARAMETERS_CONFIG.items():
283
+ hr_prompt_parameter_list += f"- **{param}:** {config['rubric']}\n"
284
+
285
+ hr_prompt_template = f"""
286
+ You are an experienced HR interview evaluator assessing a candidate's soft skills based on their answers to interview questions.
287
+ The candidate's performance across ALL answers should inform your scores for the following parameters.
288
+
289
+ **Parameters to Score (Assign a score from 1 to 5 for each):**
290
+ {hr_prompt_parameter_list}
291
+
292
+ After providing a score (1-5) for each of the above parameters, also write an **Overall Qualitative Feedback** section.
293
+ This section should summarize the candidate's general soft skill strengths and areas for improvement, based on their communication, engagement, and professionalism throughout the interview.
294
+
295
+ **REQUIRED OUTPUT FORMAT (Strictly Adhere):**
296
+
297
+ **Parameter Scores (1-5):**
298
+ Voice Modulation: [score]
299
+ Confidence: [score]
300
+ Attitude: [score]
301
+ Flow & Fluency: [score]
302
+ Structured thoughts & Clarity: [score]
303
+ Sentence Formation: [score]
304
+ Basics of Grammar + SVA: [score]
305
+ Persuasiveness: [score]
306
+ Quality of Answers: [score]
307
+
308
+ **Overall Qualitative Feedback:**
309
+ [Your holistic qualitative feedback here. Be encouraging and constructive.]
310
  """
311
+ candidate_responses_formatted_hr = "\n\n".join(
312
+ [f"Question {i+1}: {entry['question']}\nCandidate's Answer {i+1}: {str(entry.get('response', '[No response provided]'))}"
313
+ for i, entry in enumerate(st.session_state["answers"])]
314
+ )
315
+ full_prompt_for_hr_evaluation = f"{hr_prompt_template}\n\nCandidate's Interview Answers (Consider all of these for holistic parameter scoring):\n{candidate_responses_formatted_hr}"
316
+
317
+ try:
318
+ response_content = model.generate_content(full_prompt_for_hr_evaluation)
319
+ full_llm_response_text = response_content.text.strip()
320
+
321
+ print("--- LLM Output for HR Score Extraction ---")
322
+ print(full_llm_response_text)
323
+ print("-----------------------------------------")
324
+
325
+ hr_parameter_scores_parsed_dict = {} # To store parsed scores for each HR param
326
+ total_weighted_score_percentage = 0.0
327
+
328
+ for param_name_config, config_data in HR_PARAMETERS_CONFIG.items():
329
+ # Using a more specific regex, anchored to the start of a line (after optional list marker)
330
+ # re.escape ensures special characters in param_name_config are treated literally.
331
+ param_score_pattern = re.compile(
332
+ r"^\s*(?:[\*\-]\s*)?" + re.escape(param_name_config.split('(')[0].strip()) + r"\s*[:\-–—]?\s*(\d+(?:\.\d+)?)\b",
333
+ re.IGNORECASE | re.MULTILINE
334
+ ) # \b for word boundary after score
335
+
336
+ match = param_score_pattern.search(full_llm_response_text)
337
+ param_score = 1.0 # Default to 1 (lowest actual score) if not found or unparseable
338
+ if match:
339
+ try:
340
+ score_text = match.group(1)
341
+ param_score = float(score_text)
342
+ param_score = max(1.0, min(5.0, param_score)) # Clamp score strictly 1-5 for HR
343
+ print(f"HR Param '{param_name_config}' - Matched text: '{score_text}', Parsed: {param_score}")
344
+ except ValueError:
345
+ print(f"HR Param '{param_name_config}' - ValueError parsing score from '{score_text}' in match '{match.group(0)}'. Defaulting to 1.0.")
346
+ param_score = 1.0
347
+ else:
348
+ print(f"HR Param '{param_name_config}' - Score pattern not found. Defaulting to 1.0 for this param.")
349
+
350
+ hr_parameter_scores_parsed_dict[param_name_config] = param_score
351
+ total_weighted_score_percentage += (param_score / 5.0) * config_data["weight_normalized"] # Use normalized weight
352
+
353
+ st.session_state["hr_parameter_scores_dict"] = hr_parameter_scores_parsed_dict # Store for table display
354
+ st.session_state["overall_score"] = round(total_weighted_score_percentage, 1)
355
+ st.session_state["percentage_score"] = round(total_weighted_score_percentage, 1)
356
+
357
+ # Construct the feedback to be displayed: Parsed scores + Qualitative from LLM
358
+ # The full_llm_response_text might still be useful if qualitative parsing is tricky
359
+ parsed_scores_display_text = "**Parsed Parameter Scores (1-5 based on AI Evaluation):**\n"
360
+ for p_name, p_score in hr_parameter_scores_parsed_dict.items():
361
+ parsed_scores_display_text += f"- {p_name}: {p_score:.1f}/5\n"
362
+
363
+ qualitative_feedback_hr_extract = "Overall qualitative feedback section not clearly identified in AI response."
364
+ qualitative_match_hr = re.search(r"\*\*Overall Qualitative Feedback:\*\*(.*)", full_llm_response_text, re.DOTALL | re.IGNORECASE)
365
+ if qualitative_match_hr:
366
+ qualitative_feedback_hr_extract = qualitative_match_hr.group(1).strip()
367
+
368
+ st.session_state["evaluation_feedback"] = f"{parsed_scores_display_text}\n\n**Overall Qualitative Feedback from AI:**\n{qualitative_feedback_hr_extract}"
369
+
370
+ except Exception as e_hr_eval:
371
+ st.error(f"Error during HR/Soft Skills evaluation processing: {e_hr_eval}")
372
+ print(f"HR EVALUATION PROCESSING TRACEBACK:\n{traceback.format_exc()}")
373
+ st.session_state["evaluation_feedback"] = f"Could not process HR skills evaluation: {e_hr_eval}"
374
+ st.session_state["overall_score"] = 0.0
375
+ st.session_state["percentage_score"] = 0.0
376
+
377
+ else: # --- NON-HR (Analytics, Finance) Evaluation Logic ---
378
+ base_assessment_criteria_qualitative_non_hr = """
379
+ For the OVERALL qualitative summary, assess responses based on:
380
+ - Conceptual Understanding (effort and relevance more than perfect accuracy for the level)
381
+ - Communication Clarity (can the core idea be understood?)
382
+ - Depth of Explanation (relative to expected level)
383
+ - Use of Examples (if any, and if appropriate for the level)
384
+ - Logical Flow (is there a basic structure or train of thought?)
385
  """
386
+ per_question_scoring_guidelines_non_hr = f"""
387
+ For EACH question and its answer, provide a score from 0 to 5 points.
388
+ The candidate is at a {level_string} level.
389
+ Consider the following when assigning the per-question score:
390
+ - Effort and relevance of the answer.
391
+ - Clarity of thought for the candidate's level.
392
+ - Basic logical structure.
393
+ - Use of examples, if any were given and appropriate.
 
394
  """
395
+ if level_string == "beginner":
396
+ level_specific_instructions_non_hr = """
397
+ You are an **extremely understanding, encouraging, and supportive** interview evaluator for a **BEGINNER/FRESHER**. Your primary goal is to **build confidence**.
398
+ **Scoring Guidelines for Beginners (0-5 points per question):**
399
+ - **5 points:** Generally correct and relevant, even if brief. Shows clear effort and basic understanding.
400
+ - **4 points:** Good attempt, relevant, shows some understanding or key terms (e.g., one/two relevant words).
401
+ - **3 points:** Tries, somewhat related, or acknowledges question with a vague thought.
402
+ - **1-2 points:** Minimal effort, mostly irrelevant, but an attempt beyond silence.
403
+ - **0 points:** Completely irrelevant, no attempt, or placeholder.
404
+ Provide VERY positive feedback.
405
+ """
406
+ elif level_string == "intermediate":
407
+ level_specific_instructions_non_hr = """Supportive evaluator for **INTERMEDIATE**. Scoring (0-5): 5=Correct/Clear; 3-4=Mostly correct; 1-2=Partial/Gaps; 0=Incorrect."""
408
+ else: # Advanced
409
+ level_specific_instructions_non_hr = """Discerning evaluator for **ADVANCED**. Scoring (0-5): 5=Accurate/Comprehensive; 3-4=Correct lacks nuance; 1-2=Inaccurate; 0=Fundamentally incorrect."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
410
 
411
+ evaluation_prompt_template_non_hr = f"""
412
+ {level_specific_instructions_non_hr}
413
+ {per_question_scoring_guidelines_non_hr}
414
+ {base_assessment_criteria_qualitative_non_hr}
415
+ **YOUR RESPONSE MUST STRICTLY FOLLOW THIS FORMAT. PROVIDE SCORES FOR EACH QUESTION.**
416
+ Output format:
417
+
418
+ **Per-Question Scores:**
419
+ Question 1 Score: [Score for Q1 out of 5]
420
+ ... (repeat for all {num_answered_questions} questions provided)
421
+
422
+ **Overall Evaluation Summary:**
423
+ - Concept Understanding: [Overall qualitative feedback here]
424
+ - Communication: [Overall qualitative feedback here]
425
+ - Depth of Explanation: [Overall qualitative feedback here]
426
+ - Examples: [Overall qualitative feedback here]
427
+ - Logical Flow: [Overall qualitative feedback here]
428
+ [Any additional overall encouraging remarks can optionally follow here]
429
+ """
430
+ candidate_responses_formatted_non_hr = "\n\n".join(
431
+ [f"Question {i+1}: {entry['question']}\nAnswer {i+1}: {str(entry.get('response', '[No response provided]'))}" for i, entry in enumerate(st.session_state["answers"])]
432
+ )
433
+ full_prompt_for_non_hr_evaluation = f"{evaluation_prompt_template_non_hr}\n\nCandidate Responses:\n{candidate_responses_formatted_non_hr}"
434
+
435
+ try:
436
+ response_content_non_hr = model.generate_content(full_prompt_for_non_hr_evaluation)
437
+ full_llm_response_text_non_hr = response_content_non_hr.text.strip()
438
+ raw_llm_feedback_non_hr = full_llm_response_text_non_hr
439
+
440
+ print("--- LLM Output for Non-HR Score Extraction ---"); print(full_llm_response_text_non_hr); print("---")
441
+
442
+ total_score_non_hr = 0.0; parsed_scores_count_non_hr = 0; per_question_scores_list_non_hr = []
443
+ score_line_pattern_non_hr = re.compile(r"Question\s*(\d+)\s*Score:\s*(\d+(?:\.\d+)?)(?:\s*/\s*5)?", re.IGNORECASE)
444
+ text_to_search_non_hr = full_llm_response_text_non_hr
445
+ scores_block_match_non_hr = re.search(r"(?i)\*\*Per-Question Scores:\*\*(.*?)(?=\*\*Overall Evaluation Summary:\*\*|\Z)", text_to_search_non_hr, re.DOTALL)
446
+
447
+ if scores_block_match_non_hr:
448
+ text_to_search_non_hr = scores_block_match_non_hr.group(1).strip()
449
+ print(f"Non-HR: Found 'Per-Question Scores' block:\n{text_to_search_non_hr}")
450
+ else:
451
+ print("Non-HR: No dedicated 'Per-Question Scores' block found; searching entire response.")
452
 
 
 
 
453
 
454
+ for match_non_hr in score_line_pattern_non_hr.finditer(text_to_search_non_hr):
455
+ q_num_text_non_hr, score_val_text_non_hr = match_non_hr.group(1), match_non_hr.group(2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  try:
457
+ score_non_hr = float(score_val_text_non_hr)
458
+ score_non_hr = max(0.0, min(5.0, score_non_hr))
459
+ total_score_non_hr += score_non_hr
460
+ parsed_scores_count_non_hr += 1
461
+ per_question_scores_list_non_hr.append(f"Question {q_num_text_non_hr}: {score_non_hr:.1f}/5")
462
+ print(f"Non-HR Matched Q{q_num_text_non_hr} Score: {score_non_hr}")
 
463
  except ValueError:
464
+ print(f"Non-HR Warning: Could not parse score '{score_val_text_non_hr}' from: '{match_non_hr.group(0)}'")
465
+
466
+ if parsed_scores_count_non_hr != num_answered_questions and meaningful_answers_exist:
467
+ st.warning(f"Non-HR Score Count Mismatch: Parsed {parsed_scores_count_non_hr} scores, expected {num_answered_questions}.")
468
+ print(f"Non-HR Score Count Mismatch: Expected {num_answered_questions}, got {parsed_scores_count_non_hr}")
469
+
470
+ if parsed_scores_count_non_hr == 0 and meaningful_answers_exist:
471
+ st.warning("CRITICAL (Non-HR): No per-question scores parsed from LLM response. Total score set to 0.")
472
+ print("CRITICAL (Non-HR): No per-question scores parsed.")
473
+ total_score_non_hr = 0.0
474
+
475
+ max_score_non_hr = num_answered_questions * 5.0
476
+ st.session_state["overall_score"] = total_score_non_hr
477
+ st.session_state["percentage_score"] = (total_score_non_hr / max_score_non_hr) * 100.0 if max_score_non_hr > 0 else 0.0
478
+
479
+ final_feedback_non_hr = f"**Total Calculated Score:** {st.session_state['overall_score']:.1f} / {max_score_non_hr:.1f} ({st.session_state['percentage_score']:.1f}%)\n\n"
480
+ if per_question_scores_list_non_hr:
481
+ final_feedback_non_hr += "**Parsed Per-Question Scores:**\n" + "\n".join(per_question_scores_list_non_hr) + "\n\n"
482
+
483
+ qual_summary_match_non_hr = re.search(r"\*\*Overall Evaluation Summary:\*\*(.*)", raw_llm_feedback_non_hr, re.DOTALL | re.IGNORECASE)
484
+ if qual_summary_match_non_hr:
485
+ final_feedback_non_hr += "**Overall Qualitative Summary (from AI):**\n" + qual_summary_match_non_hr.group(1).strip()
486
+ else:
487
+ final_feedback_non_hr += "\n---\n**Full AI Response (for context if summary parsing failed):**\n" + raw_llm_feedback_non_hr
488
+ st.session_state["evaluation_feedback"] = final_feedback_non_hr.strip()
489
+
490
+ except Exception as e_non_hr_eval:
491
+ st.error(f"Error during Non-HR evaluation processing: {e_non_hr_eval}")
492
+ print(f"NON-HR EVALUATION PROCESSING TRACEBACK:\n{traceback.format_exc()}")
493
+ st.session_state["evaluation_feedback"] = f"Could not process Non-HR evaluation: {e_non_hr_eval}"
494
+ st.session_state["overall_score"] = 0.0
495
+ st.session_state["percentage_score"] = 0.0
496
+ ########################################///////////////////////////////////////////////////#########################################
497
  # --- Prompts for Question Generation ---
498
  BEGINNER_PROMPT = """
499
  You are a friendly mock interview trainer conducting a **Beginner-level** spoken interview in the domain of **{domain}**.
 
553
  ❌ Avoid vague or open-ended statementsβ€”each question should be concise and specific.
554
  """
555
 
556
+ ########################################///////////////////////////////////////////////////#########################################
557
  # UI styles
558
  st.markdown("""
559
  <style>
 
677
  st.sidebar.subheader("Select Interview Domain:")
678
  for domain in ["Analytics", "Finance", "Soft Skills"]:
679
  if st.sidebar.button(domain):
680
+ st.session_state.clear() # πŸ” Reset entire session state
681
  st.session_state["selected_domain"] = domain
 
 
 
 
 
 
 
 
 
 
682
  st.rerun()
683
 
684
  if not st.session_state["selected_domain"]:
 
707
  input_text = st.sidebar.text_area("Paste Job Description:")
708
 
709
  elif section_choice == "Skills":
710
+ input_text = ""
711
+
712
+ if st.session_state["selected_domain"] == "Finance":
713
+ finance_levels = ["Level-1", "Level-2", "Level-3"]
714
+ selected_level = st.sidebar.selectbox("Select a Finance Level:", finance_levels, key="finance_level_select")
715
+
716
+ difficulty = st.session_state.get("difficulty", "Beginner")
717
+
718
+ if selected_level != "Level-1":
719
+ st.sidebar.warning(f"🚧 {selected_level} content is still under development. Please select Level-1 to continue.")
720
+ st.stop()
721
+
722
+ # Map difficulty level to column in Excel
723
+ column_map = {
724
+ "Beginner": "MODULE 1-EASY",
725
+ "Intermediate": "MODULE 1-MEDIUM",
726
+ "Advanced": "MODULE 1-DIFFICULT"
727
+ }
728
+
729
+ selected_column = column_map[difficulty]
730
+
731
+ # Load Excel and questions
732
+ excel_path = os.path.join("data", "CIBOP Mock Questions.xlsx")
733
+ try:
734
+ df = pd.read_excel(excel_path, engine="openpyxl")
735
+ questions_from_excel = df[selected_column].dropna().astype(str).tolist()
736
+ input_text = selected_column # Optional, for tracking
737
+ except Exception as e:
738
+ st.sidebar.error(f"❌ Error reading Excel file: {e}")
739
+ st.stop()
740
+
741
+ st.sidebar.success(f"βœ… Loaded {difficulty}-level questions from {selected_level}")
742
+
743
+ else:
744
+ # For Analytics or any other domain
745
+ skills = {
746
+ "Analytics": ["Python", "SQL", "Machine Learning", "Statistics", "Business Analytics"]
747
  }
748
+ skill_list = skills.get(st.session_state["selected_domain"], [])
749
+ if skill_list:
750
+ selected_skill = st.sidebar.selectbox("Select a Skill:", skill_list, key="skill_select")
751
+ input_text = selected_skill
752
+ st.sidebar.markdown(f"βœ… Selected Skill: **{selected_skill}**")
753
+
754
 
755
  if st.sidebar.button("Generate Questions"):
756
  if not input_text.strip():
757
  st.warning("⚠️ Please provide input based on the selected method.")
758
  st.stop()
759
 
760
+ if st.session_state["selected_domain"] == "Finance" and section_choice == "Skills":
761
+ st.session_state["generated_questions"] = sample(questions_from_excel, min(num_qs, len(questions_from_excel)))
762
+ else:
763
+ prompt = f"Ask {num_qs} direct and core-level {difficulty} interview questions related to {input_text}. Do not include intros or numbering."
764
+ model = genai.GenerativeModel('gemini-1.5-pro-latest')
765
+ response = model.generate_content([prompt, input_text])
766
+ lines = response.text.strip().split("\n")
767
+ questions = [q.strip("* ") for q in lines if q.strip()]
768
+ st.session_state["generated_questions"] = questions[:num_qs]
769
+
770
  st.session_state["current_question_index"] = 0
771
  st.session_state["answers"] = []
772
  st.session_state["evaluation_feedback"] = ""
 
891
  st.rerun()
892
 
893
 
894
+ # === Summary Display ===
895
+
896
  # === Summary Display ===
897
  if st.session_state.get("show_summary", False):
 
898
  st.subheader("πŸ“Š Complete Mock Interview Summary")
899
 
900
+ # Fetch values from session state, providing defaults
901
+ feedback_content_for_display = st.session_state.get('evaluation_feedback', "Evaluation details not available.")
902
+ if not isinstance(feedback_content_for_display, str):
903
+ feedback_content_for_display = str(feedback_content_for_display)
904
 
905
+ # Max score basis is the number of questions that were *generated* for the session
906
+ num_qs_in_session = len(st.session_state.get("generated_questions", []))
907
+ if num_qs_in_session == 0 and st.session_state.get("answers"): # Fallback if no generated_questions but answers exist
908
+ num_qs_in_session = len(st.session_state.answers)
909
+
910
+ max_score_possible_for_session = num_qs_in_session * 5.0
911
+ current_percentage_score = st.session_state.get('percentage_score', 0.0)
912
+ current_overall_score = st.session_state.get('overall_score', 0.0)
913
 
914
+ # Display the calculated score and percentage bar first in a card
915
  st.markdown(f"""
916
+ <div class='summary-card' style="margin-bottom: 20px;">
917
+ <h4 style="color: #212529;">βœ… <strong>Overall Score:</strong> {current_overall_score:.1f} / {max_score_possible_for_session:.1f}
918
+ ({current_percentage_score:.1f}%)
919
+ </h4>
920
  <div style='margin:10px 0; position:relative;'>
921
+ <div style="background:#eee; border-radius:10px; overflow:hidden; height:30px; position:relative;">
922
  <div style="
923
+ width:{current_percentage_score}%;
924
+ background:#00c851; /* Green for progress */
925
  height:100%;
926
+ border-radius:10px 0 0 10px; /* Keep left radius for progress */
927
  transition: width 0.4s ease-in-out;
928
  "></div>
929
  <div style="
 
936
  align-items:center;
937
  justify-content:center;
938
  font-weight:bold;
939
+ color: black !important; /* Ensure text is visible on green/grey */
940
  font-size: 0.9rem;
941
+ user-select:none; /* Prevent text selection */
942
  ">
943
+ {current_percentage_score:.1f}%
944
  </div>
945
  </div>
946
  </div>
 
 
 
 
947
  </div>
948
  """, unsafe_allow_html=True)
949
+
950
+ # Display the detailed evaluation feedback text in a separate section
951
+ st.markdown("---")
952
+ st.markdown("<h4 style='color: #212529;'>Detailed Evaluation & Feedback from AI:</h4>", unsafe_allow_html=True)
953
+
954
+ # Use a styled div for the feedback text block to ensure good readability
955
+ # Replace newlines with <br> for proper HTML multiline display
956
+ html_formatted_feedback = feedback_content_for_display.replace('\n', '<br>')
957
+ st.markdown(f"""
958
+ <div style="background-color: #ffffff; color: #212529; padding: 15px; border-radius: 8px; border: 1px solid #e0e0e0; margin-top:10px; max-height: 500px; overflow-y: auto; white-space: normal; word-wrap: break-word;">
959
+ {html_formatted_feedback}
960
+ </div>
961
+ """, unsafe_allow_html=True)
962
+
963
+ st.markdown("---") # Separator
964
+
965
+ # Buttons for suggestions, download, practice
966
+ cols_summary_buttons = st.columns([1, 1, 1]) # 3 columns for the buttons
967
+
968
+ with cols_summary_buttons[0]:
969
+ if st.button("πŸ’‘ Get Improvement Suggestions", key="get_suggestions_btn_final", use_container_width=True):
970
+ # Regenerate suggestions if not present or explicitly requested again
971
+ generate_improvement_suggestions() # This function should handle st.info/st.success
972
+ st.rerun() # Rerun to show the expander or updated suggestions
973
+
974
+ # Helper function to prepare summary text for download
975
+ def prepare_summary_for_download():
976
+ download_text = f"# GrillMaster Mock Interview Summary\n\n"
977
+ download_text += f"**Selected Domain:** {st.session_state.get('selected_domain', 'N/A')}\n"
978
+ dl_difficulty = st.session_state.get('difficulty_level_select', 'N/A')
979
+ download_text += f"**Difficulty Level:** {dl_difficulty}\n"
980
+
981
+ num_q_for_max_score = len(st.session_state.get("generated_questions", st.session_state.get("answers",[])))
982
+ max_s_for_dl = num_q_for_max_score * 5.0
983
+
984
+ download_text += f"**Calculated Overall Score:** {st.session_state.get('overall_score', 0.0):.1f} / {max_s_for_dl:.1f} ({st.session_state.get('percentage_score', 0.0):.1f}%)\n\n"
985
+
986
+ download_text += "## Questions & Candidate's Answers:\n"
987
+ num_answers_actually_given = len(st.session_state.get("answers", []))
988
+ for i in range(num_q_for_max_score):
989
+ question_text_dl = st.session_state.generated_questions[i] if i < len(st.session_state.generated_questions) else "Question text not found"
990
+ answer_text_dl = "[No answer recorded]"
991
+ if i < num_answers_actually_given:
992
+ answer_text_dl = str(st.session_state.answers[i].get('response', '[No response provided]'))
993
+
994
+ download_text += f"**Question {i+1}:** {question_text_dl}\n"
995
+ download_text += f"**Your Answer {i+1}:** {answer_text_dl}\n\n"
996
+
997
+ download_text += "\n## AI Evaluation Details (Includes Parsed Scores and Qualitative Feedback):\n"
998
+ # st.session_state.evaluation_feedback is now already pre-formatted
999
+ download_text += st.session_state.get('evaluation_feedback', "No AI evaluation available.")
1000
+ download_text += "\n\n"
1001
+
1002
+ if st.session_state.get("improvement_suggestions_generated", False) and st.session_state.get("improvement_suggestions"):
1003
+ download_text += "\n## Detailed Improvement Suggestions from AI:\n"
1004
+ download_text += st.session_state.get('improvement_suggestions', "No improvement suggestions were generated.")
1005
+
1006
+ return download_text.encode('utf-8')
1007
+
1008
+ with cols_summary_buttons[1]:
1009
+ summary_bytes_dl_final = prepare_summary_for_download()
1010
+ st.download_button(
1011
+ label="πŸ’Ύ Download Full Summary",
1012
+ data=summary_bytes_dl_final,
1013
+ file_name=f"GrillMaster_Summary_{st.session_state.get('selected_domain','General')}_{time.strftime('%Y%m%d_%H%M')}.md",
1014
+ mime="text/markdown",
1015
+ key="download_summary_final_btn",
1016
+ use_container_width=True
1017
+ )
1018
+
1019
+
1020
+
1021
+ # Expander for detailed suggestions, shown if generated
1022
+ if st.session_state.get("improvement_suggestions_generated", False) and st.session_state.get("improvement_suggestions"):
1023
+ with st.expander("πŸ” View Detailed Improvement Suggestions", expanded=True): # Default to expanded once generated
1024
+ st.markdown(st.session_state.improvement_suggestions, unsafe_allow_html=True) # LLM might use markdown
1025
+
1026
+ # Conditional button for low scores
1027
+ if current_percentage_score < 50.0:
1028
+ st.warning(f"Your score is {current_percentage_score:.1f}%. Keep practicing! You can also reset all settings to try a new domain or difficulty.")
1029
+ if st.button("πŸ” Practice Again & Reset All Settings", key="practice_full_reset_final_btn", use_container_width=True):
1030
+ # Clear all session state keys and re-initialize to defaults
1031
+ keys_to_fully_clear = list(st.session_state.keys())
1032
+ for key_to_del_full in keys_to_fully_clear:
1033
+ del st.session_state[key_to_del_full]
1034
+