Sa-m commited on
Commit
b50062f
·
verified ·
1 Parent(s): 32c1345

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +456 -121
app.py CHANGED
@@ -1,157 +1,492 @@
1
- import streamlit as st
2
- import PyPDF2
3
  import os
 
 
4
  import google.generativeai as genai
5
  import tensorflow as tf
6
  from transformers import BertTokenizer, TFBertModel
7
  import numpy as np
8
- import math
9
  import speech_recognition as sr
10
- import gtts
11
- from streamlit.components.v1 import html
 
12
  import time
13
-
14
-
15
  from dotenv import load_dotenv
16
- load_dotenv()
17
 
 
 
18
 
 
 
 
19
 
20
- # no wide mode
21
- st.set_page_config(page_title="Streamlit App", page_icon=":shark:", layout="centered", initial_sidebar_state="auto")
22
-
23
- st.title("Mock Interview")
24
-
25
- genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
26
- text_model= genai.GenerativeModel("gemini-pro")
27
 
28
- st.write("Welcome to the mock interview app. This app will help you prepare for your next interview. You can practice your responses to common interview questions and receive feedback on your responses.")
29
 
30
  def getallinfo(data):
31
- text = f"{data} is not properly formatted for this model. Please try again and format the whole in a single paragraph covering all the information."
 
 
32
  response = text_model.generate_content(text)
33
  response.resolve()
34
  return response.text
35
 
36
- def file_processing(uploaded_file):
37
- # upload pdf of resume
38
- reader = PyPDF2.PdfReader(uploaded_file)
39
- text = ""
40
- for page in reader.pages:
41
- text += page.extract_text()
42
  return text
43
 
44
-
45
- # Load the pre-trained BERT model
46
- model = TFBertModel.from_pretrained("bert-base-uncased")
47
- tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
48
-
49
- # Function to preprocess text and get embeddings
50
  def get_embedding(text):
51
- encoded_text = tokenizer(text, return_tensors="tf")
52
  output = model(encoded_text)
53
  embedding = output.last_hidden_state[:, 0, :]
54
  return embedding
55
 
56
- # Function to generate feedback (replace with your logic)
57
  def generate_feedback(question, answer):
58
- # Ensure correct variable name (case-sensitive)
59
- question_embedding = get_embedding(question)
60
- answer_embedding = get_embedding(answer)
61
-
62
- # Enable NumPy-like behavior for transpose
63
- tf.experimental.numpy.experimental_enable_numpy_behavior()
64
-
65
- # Calculate similarity score (cosine similarity)
66
- similarity_score = np.dot(question_embedding, answer_embedding.T) / (np.linalg.norm(question_embedding) * np.linalg.norm(answer_embedding))
67
-
68
- # Generate basic feedback based on similarity score
69
- corrected_string = f"Feedback: {np.array2string(similarity_score, precision=2)}"
70
- # print(corrected_string)
71
- return np.array2string(similarity_score, precision=2)
 
72
 
73
  def generate_questions(roles, data):
74
  questions = []
75
- text = f"If this is not a resume then return text uploaded pdf is not a resume. this is a resume overview of the candidate. The candidate details are in {data}. The candidate has applied for the role of {roles}. Generate questions for the candidate based on the role applied and on the Resume of the candidate. Not always necceassary to ask only technical questions related to the role. Ask some personal questions too. Ask no additional questions. Dont categorize the questions. No of questions should range from 1-3 questions only. Ask one question at a time only."
76
- response = text_model.generate_content(text)
77
- response.resolve()
78
- # slipt the response into questions either by \n or by ? or by . or by !
79
- questions = response.text.split("\n")
80
-
81
- return questions
82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
 
84
  def generate_overall_feedback(data, percent, answer, questions):
85
- percent = float(percent)
86
- if percent > 0.5:
87
- test = f"Here is the overview of the candidate {data}. In the interview the questions asked were {questions}. The candidate has answered the questions as follows: {answer}. Based on the answers provided, the candidate has scored {percent}. The candidate has done well in the interview. The candidate has answered the questions well and has a good understanding of the concepts. The candidate has scored well in the interview. The candidate has scored {percent} in the interview. The candidate has done well in the interview. The candidate has answered the questions well and has a good understanding of the concepts. The candidate has scored well in the interview. The candidate has scored {percent} in the interview."
88
- else:
89
- test = f"Here is the overview of the candidate {data}. In the interview the questions asked were {questions}. The candidate has answered the questions as follows: {answer}. Based on the answers provided, the candidate has scored {percent}. tell the average percent and rate the interview out of 10. Give the feedback to the candidate about the interview and areas of improvements. While talking to candidate always take their name. give the candidate various ways to improve their interview skills. The candidate needs to know about where they are going wrong and the solution to the issues they are having during the interview."
90
- # st.write(test)
91
- response = text_model.generate_content(test)
92
- response.resolve()
93
- return response.text
 
 
 
 
 
 
 
94
 
95
- # def store_audio_text():
96
- # r = sr.Recognizer()
97
- # with sr.Microphone() as source:
98
- # st.write("Speak now")
99
- # audio = r.listen(source)
100
- # try:
101
- # text = r.recognize_google(audio)
102
- # # st.success(f"Your Answer: {text}")
103
- # return text
104
- # except:
105
- # st.write("Sorry could not recognize your voice")
106
- # return " "
107
  def store_audio_text():
108
- try:
109
- for index, name in enumerate(sr.Microphone.list_microphone_names()):
110
- print(f"Microphone with index {index}: {name}")
111
- recognizer = sr.Recognizer()
112
- with sr.Microphone(device_index=0) as source:
113
- st.write("Listening...")
114
- audio = recognizer.listen(source)
115
- text = recognizer.recognize_google(audio)
116
- st.write(f"Your Answer: {text}")
 
 
 
 
 
117
  return text
118
- except sr.RequestError as e:
119
- st.error("API unavailable or unresponsive")
120
- except sr.UnknownValueError:
121
- st.error("Unable to recognize speech")
122
- except OSError:
123
- st.error("Microphone not available or not accessible in this environment")
124
-
125
- uploaded_file = st.file_uploader("Upload your resume in simple Document Format", type=["pdf"])
126
- roles_applied = []
127
- if uploaded_file is not None:
128
- st.write("File uploaded successfully!")
129
- data = file_processing(uploaded_file)
130
- # st.write(data)
131
- # st.write(getallinfo(data))
132
- updated_data = getallinfo(data)
133
- # st.write(updated_data)
134
- roles = st.multiselect("Select your job role:", ["Data Scientist", "Software Engineer", "Product Manager", "Data Analyst", "Business Analyst"])
135
- if roles:
136
- roles_applied.append(roles)
137
- st.write(f"Selected roles: {roles}")
138
- questions = generate_questions(roles, updated_data)
139
- feedback = []
140
- answers = []
141
- ans = ""
142
- interaction = {}
143
- for i in range(len(questions)):
144
- st.write(questions[i])
145
- ans = store_audio_text()
146
- st.success(ans)
147
- answers.append(ans)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
  percent = 0.0
149
- percent = generate_feedback(questions[i], answers[i])
150
- print(percent)
151
- feedback.append(generate_overall_feedback(data, percent, answers[i], questions[i]))
152
- interaction[questions[i]] = answers[i]
153
- if st.button("Submit"):
154
- for i in range(len(questions)):
155
- st.write(interaction[questions[i]])
156
- st.write(feedback[i])
157
- # st.write("Thank you for your responses!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # PrepGenie/app.py
2
+ import gradio as gr
3
  import os
4
+ import tempfile
5
+ import PyPDF2
6
  import google.generativeai as genai
7
  import tensorflow as tf
8
  from transformers import BertTokenizer, TFBertModel
9
  import numpy as np
 
10
  import speech_recognition as sr
11
+ from gtts import gTTS
12
+ import pygame
13
+ import io
14
  import time
 
 
15
  from dotenv import load_dotenv
 
16
 
17
+ # Load environment variables
18
+ load_dotenv()
19
 
20
+ # Configure Generative AI
21
+ genai.configure(api_key=os.getenv("GOOGLE_API_KEY")) # Use environment variable or set a default
22
+ text_model = genai.GenerativeModel("gemini-2.5-flash")
23
 
24
+ # Load BERT model and tokenizer
25
+ model = TFBertModel.from_pretrained("bert-base-uncased")
26
+ tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
 
 
 
 
27
 
28
+ # --- Helper Functions (Logic from Streamlit) ---
29
 
30
  def getallinfo(data):
31
+ text = f"""{data} is given by the user. Make sure you are getting the details like name, experience,
32
+ education, skills of the user like in a resume. If the details are not provided return: not a resume.
33
+ If details are provided then please try again and format the whole in a single paragraph covering all the information. """
34
  response = text_model.generate_content(text)
35
  response.resolve()
36
  return response.text
37
 
38
+ def file_processing(pdf_file_path): # Takes file path now
39
+ with open(pdf_file_path, "rb") as f: # Open file from path
40
+ reader = PyPDF2.PdfReader(f)
41
+ text = ""
42
+ for page in reader.pages:
43
+ text += page.extract_text()
44
  return text
45
 
 
 
 
 
 
 
46
  def get_embedding(text):
47
+ encoded_text = tokenizer(text, return_tensors="tf", truncation=True, padding=True) # Add padding/truncation
48
  output = model(encoded_text)
49
  embedding = output.last_hidden_state[:, 0, :]
50
  return embedding
51
 
 
52
  def generate_feedback(question, answer):
53
+ try:
54
+ question_embedding = get_embedding(question)
55
+ answer_embedding = get_embedding(answer)
56
+ tf.experimental.numpy.experimental_enable_numpy_behavior()
57
+ # Calculate cosine similarity
58
+ dot_product = np.dot(question_embedding, answer_embedding.T)
59
+ norms = np.linalg.norm(question_embedding) * np.linalg.norm(answer_embedding)
60
+ if norms == 0:
61
+ similarity_score = 0.0
62
+ else:
63
+ similarity_score = dot_product / norms
64
+ return f"{similarity_score[0][0]:.2f}" # Format as string
65
+ except Exception as e:
66
+ print(f"Error generating feedback: {e}")
67
+ return "0.00"
68
 
69
  def generate_questions(roles, data):
70
  questions = []
71
+ # Ensure roles is a list and join if needed
72
+ if isinstance(roles, list):
73
+ roles_str = ", ".join(roles)
74
+ else:
75
+ roles_str = str(roles)
 
 
76
 
77
+ text = f"""If this is not a resume then return text uploaded pdf is not a resume. this is a resume overview of the candidate.
78
+ The candidate details are in {data}. The candidate has applied for the role of {roles_str}.
79
+ Generate questions for the candidate based on the role applied and on the Resume of the candidate.
80
+ Not always necceassary to ask only technical questions related to the role but the logic of question
81
+ should include the job applied for because there might be some deep tech questions which the user might not know.
82
+ Ask some personal questions too.Ask no additional questions. Dont categorize the questions.
83
+ ask 2 questions only. directly ask the questions not anything else.
84
+ Also ask the questions in a polite way. Ask the questions in a way that the candidate can understand the question.
85
+ and make sure the questions are related to these metrics: Communication skills, Teamwork and collaboration,
86
+ Problem-solving and critical thinking, Time management and organization, Adaptability and resilience. dont
87
+ tell anything else just give me the questions. if there is a limit in no of questions, ask or try questions that covers
88
+ all need."""
89
+ try:
90
+ response = text_model.generate_content(text)
91
+ response.resolve()
92
+ questions_text = response.text.strip()
93
+ # Split by newline, question mark, or period. Filter out empty strings.
94
+ questions = [q.strip() for q in questions_text.split('\n') if q.strip()]
95
+ if not questions:
96
+ questions = [q.strip() for q in questions_text.split('?') if q.strip()]
97
+ if not questions:
98
+ questions = [q.strip() for q in questions_text.split('.') if q.strip()]
99
+ # Ensure we only get up to 2 questions
100
+ questions = questions[:2] if questions else ["Could you please introduce yourself based on your resume?"]
101
+ except Exception as e:
102
+ print(f"Error generating questions: {e}")
103
+ questions = ["Could you please introduce yourself based on your resume?"]
104
+ return questions
105
 
106
  def generate_overall_feedback(data, percent, answer, questions):
107
+ prompt = f"""As an interviewer, provide concise feedback (max 150 words) for candidate {data}.
108
+ Questions asked: {questions}
109
+ Candidate's answers: {answer}
110
+ Score: {percent}
111
+ Feedback should include:
112
+ 1. Overall performance assessment (2-3 sentences)
113
+ 2. Key strengths (2-3 points)
114
+ 3. Areas for improvement (2-3 points)
115
+ Be honest and constructive. Do not mention the exact score, but rate the candidate out of 10 based on their answers."""
116
+ try:
117
+ response = text_model.generate_content(prompt)
118
+ response.resolve()
119
+ return response.text
120
+ except Exception as e:
121
+ print(f"Error generating overall feedback: {e}")
122
+ return "Feedback could not be generated."
123
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  def store_audio_text():
125
+ r = sr.Recognizer()
126
+ r.energy_threshold = 300
127
+ r.dynamic_energy_threshold = True
128
+ r.pause_threshold = 3
129
+ with sr.Microphone() as source:
130
+ print("Adjusting for ambient noise...")
131
+ r.adjust_for_ambient_noise(source, duration=1)
132
+ print("Speak now... (You have 200 seconds)")
133
+ try:
134
+ # Listen for up to 380 seconds, but stop if 200 seconds of silence
135
+ audio = r.listen(source, timeout=380, phrase_time_limit=200)
136
+ print("Processing audio...")
137
+ text = r.recognize_google(audio)
138
+ print(f"Recognized text: {text}")
139
  return text
140
+ except sr.WaitTimeoutError:
141
+ print("Listening timed out.")
142
+ return " "
143
+ except sr.RequestError as e:
144
+ print(f"Could not request results from Google Speech Recognition service; {e}")
145
+ return " "
146
+ except sr.UnknownValueError:
147
+ print("Google Speech Recognition could not understand audio")
148
+ return " "
149
+ except Exception as e:
150
+ print(f"An error occurred during speech recognition: {e}")
151
+ return " "
152
+
153
+ def generate_metrics(data, answer, question):
154
+ metrics = {}
155
+ text = f"""Here is the overview of the candidate {data}. In the interview the question asked was {question}.
156
+ The candidate has answered the question as follows: {answer}. Based on the answers provided, give me the metrics related to:
157
+ Communication skills, Teamwork and collaboration, Problem-solving and critical thinking, Time management and organization,
158
+ Adaptability and resilience.
159
+ Rules for rating:
160
+ - Rate each skill from 0 to 10
161
+ - If the answer is empty, 'Sorry could not recognize your voice', meaningless, or irrelevant: rate all skills as 0
162
+ - Only provide numeric ratings without any additional text or '/10'
163
+ - Ratings must reflect actual content quality - do not give courtesy points
164
+ - Consider answer relevance to the specific skill being rated
165
+ Format:
166
+ Communication skills: [rating]
167
+ Teamwork and collaboration: [rating]
168
+ Problem-solving and critical thinking: [rating]
169
+ Time management and organization: [rating]
170
+ Adaptability and resilience: [rating]"""
171
+ try:
172
+ response = text_model.generate_content(text)
173
+ response.resolve()
174
+ metrics_text = response.text.strip()
175
+ # Parse the metrics text
176
+ for line in metrics_text.split('\n'):
177
+ if ':' in line:
178
+ key, value_str = line.split(':', 1)
179
+ key = key.strip()
180
+ try:
181
+ value = float(value_str.strip())
182
+ metrics[key] = value
183
+ except ValueError:
184
+ # If parsing fails, set to 0
185
+ metrics[key] = 0.0
186
+ # Ensure all expected metrics are present
187
+ expected_metrics = [
188
+ "Communication skills", "Teamwork and collaboration",
189
+ "Problem-solving and critical thinking", "Time management and organization",
190
+ "Adaptability and resilience"
191
+ ]
192
+ for m in expected_metrics:
193
+ if m not in metrics:
194
+ metrics[m] = 0.0
195
+
196
+ except Exception as e:
197
+ print(f"Error generating metrics: {e}")
198
+ # Return default 0 metrics on error
199
+ metrics = {
200
+ "Communication skills": 0.0, "Teamwork and collaboration": 0.0,
201
+ "Problem-solving and critical thinking": 0.0, "Time management and organization": 0.0,
202
+ "Adaptability and resilience": 0.0
203
+ }
204
+ return metrics
205
+
206
+ # --- Gradio UI Components and Logic ---
207
+
208
+ def process_resume(file_obj):
209
+ """Handles resume upload and processing."""
210
+ if not file_obj:
211
+ return "Please upload a PDF resume.", gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
212
+
213
+ try:
214
+ # Save uploaded file to a temporary location
215
+ temp_dir = tempfile.mkdtemp()
216
+ file_path = os.path.join(temp_dir, file_obj.name)
217
+ with open(file_path, "wb") as f:
218
+ f.write(file_obj.read())
219
+
220
+ # Process the PDF
221
+ raw_text = file_processing(file_path)
222
+ processed_data = getallinfo(raw_text)
223
+
224
+ # Clean up temporary file
225
+ os.remove(file_path)
226
+ os.rmdir(temp_dir)
227
+
228
+ return (
229
+ f"File processed successfully!",
230
+ gr.update(visible=True), # Role selection dropdown
231
+ gr.update(visible=False),
232
+ gr.update(visible=False),
233
+ gr.update(visible=False),
234
+ gr.update(visible=False),
235
+ gr.update(visible=False),
236
+ gr.update(visible=False),
237
+ gr.update(visible=False),
238
+ gr.update(visible=False),
239
+ gr.update(visible=False),
240
+ gr.update(visible=False),
241
+ gr.update(visible=False),
242
+ gr.update(visible=False),
243
+ processed_data # Pass processed data for next step
244
+ )
245
+ except Exception as e:
246
+ return f"Error processing file: {str(e)}", gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), None
247
+
248
+ def start_interview(roles, processed_resume_data):
249
+ """Starts the interview process."""
250
+ if not roles or not processed_resume_data:
251
+ return "Please select a role and ensure resume is processed.", "", [], [], {}, {}, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
252
+
253
+ try:
254
+ questions = generate_questions(roles, processed_resume_data)
255
+ initial_question = questions[0] if questions else "Could you please introduce yourself?"
256
+
257
+ # Initialize state for the interview
258
+ interview_state = {
259
+ "questions": questions,
260
+ "current_q_index": 0,
261
+ "answers": [],
262
+ "feedback": [],
263
+ "interactions": {},
264
+ "metrics_list": [], # List to store metrics for each question
265
+ "resume_data": processed_resume_data
266
+ }
267
+
268
+ return (
269
+ "Interview started. Please answer the first question.",
270
+ initial_question,
271
+ questions,
272
+ [], # answers
273
+ {}, # interactions
274
+ {}, # metrics (initially empty)
275
+ gr.update(visible=True), # Audio input
276
+ gr.update(visible=True), # Submit Answer button
277
+ gr.update(visible=True), # Next Question button
278
+ gr.update(visible=False), # Submit Interview button (hidden initially)
279
+ gr.update(visible=False), # Feedback textbox
280
+ gr.update(visible=False), # Metrics display
281
+ gr.update(visible=False), # Evaluation button (hidden initially)
282
+ gr.update(visible=True), # Question display
283
+ gr.update(visible=True), # Answer instructions
284
+ interview_state
285
+ )
286
+ except Exception as e:
287
+ return f"Error starting interview: {str(e)}", "", [], [], {}, {}, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), None
288
+
289
+ def submit_answer(audio, interview_state):
290
+ """Handles submitting an answer via audio."""
291
+ if not audio or not interview_state:
292
+ return "No audio recorded or interview not started.", "", interview_state, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
293
+
294
+ try:
295
+ # Save audio to a temporary file
296
+ temp_dir = tempfile.mkdtemp()
297
+ audio_file_path = os.path.join(temp_dir, "recorded_audio.wav")
298
+ audio[1].save(audio_file_path) # audio is a tuple (sample_rate, numpy_array)
299
+
300
+ # Convert audio file to text
301
+ r = sr.Recognizer()
302
+ with sr.AudioFile(audio_file_path) as source:
303
+ audio_data = r.record(source)
304
+ answer_text = r.recognize_google(audio_data)
305
+ print(f"Recognized Answer: {answer_text}")
306
+
307
+ # Clean up temporary audio file
308
+ os.remove(audio_file_path)
309
+ os.rmdir(temp_dir)
310
+
311
+ # Update state with the answer
312
+ interview_state["answers"].append(answer_text)
313
+ current_q_index = interview_state["current_q_index"]
314
+ current_question = interview_state["questions"][current_q_index]
315
+ interview_state["interactions"][f"Q{current_q_index + 1}: {current_question}"] = f"A{current_q_index + 1}: {answer_text}"
316
+
317
+ # Generate feedback and metrics for the current question
318
+ percent_str = generate_feedback(current_question, answer_text)
319
+ try:
320
+ percent = float(percent_str)
321
+ except ValueError:
322
  percent = 0.0
323
+
324
+ feedback_text = generate_overall_feedback(interview_state["resume_data"], percent_str, answer_text, current_question)
325
+ interview_state["feedback"].append(feedback_text)
326
+
327
+ metrics = generate_metrics(interview_state["resume_data"], answer_text, current_question)
328
+ interview_state["metrics_list"].append(metrics) # Store metrics for this question
329
+
330
+ # Update state index
331
+ interview_state["current_q_index"] += 1
332
+
333
+ return (
334
+ f"Answer submitted: {answer_text}",
335
+ answer_text,
336
+ interview_state,
337
+ gr.update(visible=True), # Show feedback textbox
338
+ gr.update(value=feedback_text, visible=True), # Update feedback textbox
339
+ gr.update(visible=True), # Show metrics display
340
+ gr.update(value=metrics, visible=True), # Update metrics display
341
+ gr.update(visible=True), # Keep audio input visible for next question
342
+ gr.update(visible=True), # Keep submit answer button
343
+ gr.update(visible=True), # Keep next question button
344
+ gr.update(visible=False), # Submit interview button still hidden
345
+ gr.update(visible=True), # Question display
346
+ gr.update(visible=True) # Answer instructions
347
+ )
348
+
349
+ except Exception as e:
350
+ print(f"Error processing audio answer: {e}")
351
+ return "Error processing audio. Please try again.", "", interview_state, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), gr.update(visible=False), gr.update(visible=True), gr.update(visible=True)
352
+
353
+ def next_question(interview_state):
354
+ """Moves to the next question or ends the interview."""
355
+ if not interview_state:
356
+ return "Interview not started.", "", interview_state, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
357
+
358
+ current_q_index = interview_state["current_q_index"]
359
+ total_questions = len(interview_state["questions"])
360
+
361
+ if current_q_index < total_questions:
362
+ next_q = interview_state["questions"][current_q_index]
363
+ return (
364
+ f"Question {current_q_index + 1}/{total_questions}",
365
+ next_q,
366
+ interview_state,
367
+ gr.update(visible=True), # Audio input
368
+ gr.update(visible=True), # Submit Answer
369
+ gr.update(visible=True), # Next Question
370
+ gr.update(visible=False), # Feedback textbox (hidden for new question)
371
+ gr.update(visible=False), # Metrics display (hidden for new question)
372
+ gr.update(visible=False), # Submit Interview (still hidden)
373
+ gr.update(visible=True), # Question display
374
+ gr.update(visible=True), # Answer instructions
375
+ "", # Clear previous answer display
376
+ {} # Clear previous metrics display
377
+ )
378
+ else:
379
+ # Interview finished
380
+ return (
381
+ "Interview completed! Click 'Submit Interview' to see your evaluation.",
382
+ "Interview Finished",
383
+ interview_state,
384
+ gr.update(visible=False), # Hide audio input
385
+ gr.update(visible=False), # Hide submit answer
386
+ gr.update(visible=False), # Hide next question
387
+ gr.update(visible=False), # Hide feedback textbox
388
+ gr.update(visible=False), # Hide metrics display
389
+ gr.update(visible=True), # Show submit interview button
390
+ gr.update(visible=True), # Question display (shows finished)
391
+ gr.update(visible=False), # Hide answer instructions
392
+ "", # Clear answer display
393
+ {} # Clear metrics display
394
+ )
395
+
396
+ def submit_interview(interview_state):
397
+ """Handles final submission and triggers evaluation."""
398
+ if not interview_state:
399
+ return "Interview state is missing.", interview_state
400
+
401
+ # The evaluation logic would typically be triggered here or handled in a separate function.
402
+ # For now, we'll just indicate it's ready.
403
+ print("Interview submitted for evaluation.")
404
+ print("Final State:", interview_state)
405
+ # In a full implementation, you might call an evaluation function here
406
+ # or redirect to an evaluation page/component.
407
+
408
+ return "Interview submitted successfully!", interview_state
409
+
410
+ # --- Gradio Interface ---
411
+
412
+ with gr.Blocks(title="PrepGenie - Mock Interview") as demo:
413
+ gr.Markdown("# 🦈 PrepGenie - Mock Interview")
414
+ gr.Markdown("Prepare for your next interview with AI-powered feedback.")
415
+
416
+ # State to hold interview data
417
+ interview_state = gr.State({})
418
+
419
+ # File Upload Section
420
+ with gr.Row():
421
+ with gr.Column():
422
+ file_upload = gr.File(label="Upload Resume (PDF)", file_types=[".pdf"])
423
+ process_btn = gr.Button("Process Resume")
424
+ with gr.Column():
425
+ file_status = gr.Textbox(label="Status", interactive=False)
426
+
427
+ # Role Selection (Initially hidden)
428
+ role_selection = gr.Dropdown(
429
+ choices=["Data Scientist", "Software Engineer", "Product Manager", "Data Analyst", "Business Analyst"],
430
+ multiselect=True,
431
+ label="Select Job Role(s)",
432
+ visible=False
433
+ )
434
+ start_interview_btn = gr.Button("Start Interview", visible=False)
435
+
436
+ # Interview Section (Initially hidden)
437
+ question_display = gr.Textbox(label="Question", interactive=False, visible=False)
438
+ answer_instructions = gr.Markdown("Click 'Record Answer' and speak your response.", visible=False)
439
+ audio_input = gr.Audio(label="Record Answer", type="numpy", visible=False)
440
+ submit_answer_btn = gr.Button("Submit Answer", visible=False)
441
+ next_question_btn = gr.Button("Next Question", visible=False)
442
+ submit_interview_btn = gr.Button("Submit Interview", visible=False, variant="primary")
443
+
444
+ # Feedback and Metrics (Initially hidden)
445
+ answer_display = gr.Textbox(label="Your Answer", interactive=False, visible=False)
446
+ feedback_display = gr.Textbox(label="Feedback", interactive=False, visible=False)
447
+ metrics_display = gr.JSON(label="Metrics", visible=False)
448
+
449
+ # Hidden textbox to hold processed resume data temporarily
450
+ processed_resume_data = gr.Textbox(visible=False)
451
+
452
+ # --- Event Listeners ---
453
+
454
+ # Process Resume
455
+ process_btn.click(
456
+ fn=process_resume,
457
+ inputs=[file_upload],
458
+ outputs=[file_status, role_selection, start_interview_btn, question_display, answer_instructions, audio_input, submit_answer_btn, next_question_btn, submit_interview_btn, answer_display, feedback_display, metrics_display, processed_resume_data]
459
+ )
460
+
461
+ # Start Interview
462
+ start_interview_btn.click(
463
+ fn=start_interview,
464
+ inputs=[role_selection, processed_resume_data],
465
+ outputs=[file_status, question_display, interview_state["questions"], interview_state["answers"], interview_state["interactions"], interview_state["metrics_list"], audio_input, submit_answer_btn, next_question_btn, submit_interview_btn, feedback_display, metrics_display, interview_state, question_display, answer_instructions, interview_state]
466
+ )
467
+
468
+ # Submit Answer
469
+ submit_answer_btn.click(
470
+ fn=submit_answer,
471
+ inputs=[audio_input, interview_state],
472
+ outputs=[file_status, answer_display, interview_state, feedback_display, feedback_display, metrics_display, metrics_display, audio_input, submit_answer_btn, next_question_btn, submit_interview_btn, question_display, answer_instructions]
473
+ )
474
+
475
+ # Next Question
476
+ next_question_btn.click(
477
+ fn=next_question,
478
+ inputs=[interview_state],
479
+ outputs=[file_status, question_display, interview_state, audio_input, submit_answer_btn, next_question_btn, feedback_display, metrics_display, submit_interview_btn, question_display, answer_instructions, answer_display, metrics_display]
480
+ )
481
+
482
+ # Submit Interview (Placeholder for evaluation trigger)
483
+ submit_interview_btn.click(
484
+ fn=submit_interview,
485
+ inputs=[interview_state],
486
+ outputs=[file_status, interview_state]
487
+ # In a full app, you might navigate to an evaluation page here
488
+ )
489
+
490
+ # Run the app
491
+ if __name__ == "__main__":
492
+ demo.launch() # You can add server_name="0.0.0.0", server_port=7860 for external access