KittyMona commited on
Commit
d69dd0a
Β·
verified Β·
1 Parent(s): 5936d24

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -28
app.py CHANGED
@@ -1,4 +1,5 @@
1
- # imports
 
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
  import re
@@ -6,7 +7,7 @@ import random
6
  import whisper
7
  from pydub import AudioSegment
8
 
9
- # uploading and cleaning the knowledge txt file
10
  def load_questions(file_path):
11
  with open(file_path, 'r') as f:
12
  data = f.read()
@@ -16,12 +17,13 @@ def load_questions(file_path):
16
  for block in question_blocks:
17
  parts = block.split('Possible Answers:')
18
  question_text = parts[0].strip()
19
- questions.append({'question': question_text})
 
 
20
  return questions
21
 
22
  all_questions = load_questions('knowledge.txt')
23
 
24
- # creating the questions based on each interview
25
  questions_by_type = {
26
  'Technical': [q for q in all_questions if any(keyword in q['question'].lower() for keyword in [
27
  'function', 'linked list', 'url', 'rest', 'graphql', 'garbage', 'cap theorem', 'sql', 'hash table',
@@ -31,36 +33,31 @@ questions_by_type = {
31
  'Competency-Based Interview': [q for q in all_questions if any(keyword in q['question'].lower() for keyword in [
32
  "debugging", "learning", "deadlines", "teamwork", "leadership", "mistake", "conflict", "decision"])],
33
  'Case': [q for q in all_questions if any(keyword in q['question'].lower() for keyword in [
34
- "testing", "financial", "automation", "analysis", "regression", "business", "stakeholder"])]
35
  }
36
 
37
- # models
38
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
39
  whisper_model = whisper.load_model("base")
40
 
41
- # whisper audio-to-text function
42
  def transcribe_audio(file_path):
43
  try:
44
- print(f"πŸ“‚ Processing audio: {file_path}")
45
  audio = AudioSegment.from_file(file_path)
46
  converted_path = "converted.wav"
47
  audio.export(converted_path, format="wav")
48
  result = whisper_model.transcribe(converted_path, fp16=False)
49
  return result["text"]
50
  except Exception as e:
51
- return f"❌ ERROR: {str(e)}"
52
 
53
- # step 1
54
  def set_type(choice, user_profile):
55
  user_profile["interview_type"] = choice
56
  return "Great! What’s your background and what field/role are you aiming for?", user_profile
57
 
58
- # step 2
59
  def save_background(info, user_profile):
60
  user_profile["field"] = info
61
  return "Awesome! Type 'start' below to begin your interview.", user_profile
62
 
63
- # updated step 3
64
  def respond(message, chat_history, user_profile):
65
  message_lower = message.strip().lower()
66
 
@@ -112,7 +109,6 @@ def respond(message, chat_history, user_profile):
112
  chat_history.append((message, feedback))
113
  return chat_history
114
 
115
- # chatbot fallback
116
  messages = [{"role": "system", "content": f"You are a professional interviewer conducting a {user_profile['interview_type']} interview for a candidate in {user_profile['field']}."}]
117
  for q, a in chat_history:
118
  messages.append({"role": "user", "content": q})
@@ -124,27 +120,33 @@ def respond(message, chat_history, user_profile):
124
  chat_history.append((message, bot_msg))
125
  return chat_history
126
 
127
- # updated feedback using model
128
  def generate_feedback(user_profile):
 
129
  questions = user_profile.get('questions', [])
130
  answers = user_profile.get('user_answers', [])
131
- feedback = []
132
 
133
- for i, (question, user_answer) in enumerate(zip(questions, answers)):
134
- prompt = (
135
- f"You are an expert interviewer. Please evaluate the following response:\n\n"
136
- f"Question: {question['question']}\n"
137
- f"Candidate's Answer: {user_answer}\n\n"
138
- f"Give a brief, constructive evaluation. Say whether it's a strong, okay, or weak answer and what could be improved."
139
- )
140
 
141
- response = client.text_generation(prompt, max_new_tokens=200)
142
- evaluation = response.strip()
143
- feedback.append(f"Question {i+1}:\n{evaluation}\n")
 
 
 
 
 
 
 
 
 
 
 
 
 
144
 
145
  return "\n".join(feedback)
146
 
147
- # handle audio
148
  def handle_audio(audio_file, chat_history, user_profile):
149
  transcribed = transcribe_audio(audio_file)
150
  if transcribed.startswith("❌"):
@@ -152,7 +154,6 @@ def handle_audio(audio_file, chat_history, user_profile):
152
  return chat_history
153
  return respond(transcribed, chat_history, user_profile)
154
 
155
- # UI
156
  with gr.Blocks() as demo:
157
  user_profile = gr.State({"interview_type": "", "field": "", "interview_in_progress": False})
158
  chat_history = gr.State([])
@@ -194,4 +195,4 @@ with gr.Blocks() as demo:
194
  send_btn.click(lambda: "", None, msg, queue=False)
195
  audio_btn.click(handle_audio, inputs=[audio_input, chat_history, user_profile], outputs=[chatbot], queue=False)
196
 
197
- demo.launch()
 
1
+ # Updated full code with safe feedback handling
2
+
3
  import gradio as gr
4
  from huggingface_hub import InferenceClient
5
  import re
 
7
  import whisper
8
  from pydub import AudioSegment
9
 
10
+ # Load and parse the knowledge file
11
  def load_questions(file_path):
12
  with open(file_path, 'r') as f:
13
  data = f.read()
 
17
  for block in question_blocks:
18
  parts = block.split('Possible Answers:')
19
  question_text = parts[0].strip()
20
+ answers_text = parts[1].strip()
21
+ possible_answers = [ans.strip() for ans in re.split(r'\d+\.\s+', answers_text) if ans.strip()]
22
+ questions.append({'question': question_text, 'answers': possible_answers})
23
  return questions
24
 
25
  all_questions = load_questions('knowledge.txt')
26
 
 
27
  questions_by_type = {
28
  'Technical': [q for q in all_questions if any(keyword in q['question'].lower() for keyword in [
29
  'function', 'linked list', 'url', 'rest', 'graphql', 'garbage', 'cap theorem', 'sql', 'hash table',
 
33
  'Competency-Based Interview': [q for q in all_questions if any(keyword in q['question'].lower() for keyword in [
34
  "debugging", "learning", "deadlines", "teamwork", "leadership", "mistake", "conflict", "decision"])],
35
  'Case': [q for q in all_questions if any(keyword in q['question'].lower() for keyword in [
36
+ "testing", "financial", "automation", "analysis", "regression", "business", "stakeholder"])]
37
  }
38
 
 
39
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
40
  whisper_model = whisper.load_model("base")
41
 
 
42
  def transcribe_audio(file_path):
43
  try:
44
+ print(f"\U0001F4C2 Processing audio: {file_path}")
45
  audio = AudioSegment.from_file(file_path)
46
  converted_path = "converted.wav"
47
  audio.export(converted_path, format="wav")
48
  result = whisper_model.transcribe(converted_path, fp16=False)
49
  return result["text"]
50
  except Exception as e:
51
+ return f"\u274C ERROR: {str(e)}"
52
 
 
53
  def set_type(choice, user_profile):
54
  user_profile["interview_type"] = choice
55
  return "Great! What’s your background and what field/role are you aiming for?", user_profile
56
 
 
57
  def save_background(info, user_profile):
58
  user_profile["field"] = info
59
  return "Awesome! Type 'start' below to begin your interview.", user_profile
60
 
 
61
  def respond(message, chat_history, user_profile):
62
  message_lower = message.strip().lower()
63
 
 
109
  chat_history.append((message, feedback))
110
  return chat_history
111
 
 
112
  messages = [{"role": "system", "content": f"You are a professional interviewer conducting a {user_profile['interview_type']} interview for a candidate in {user_profile['field']}."}]
113
  for q, a in chat_history:
114
  messages.append({"role": "user", "content": q})
 
120
  chat_history.append((message, bot_msg))
121
  return chat_history
122
 
 
123
  def generate_feedback(user_profile):
124
+ feedback = []
125
  questions = user_profile.get('questions', [])
126
  answers = user_profile.get('user_answers', [])
 
127
 
128
+ if not questions or not answers:
129
+ return "⚠️ Feedback unavailable: Make sure you've completed the interview first."
 
 
 
 
 
130
 
131
+ for i, user_ans in enumerate(answers):
132
+ if i >= len(questions):
133
+ feedback.append(f"⚠️ No matching question for answer {i+1}.")
134
+ continue
135
+
136
+ correct_answers = questions[i].get('answers', [])
137
+ if not correct_answers:
138
+ feedback.append(f"❌ No expected answers listed for question {i+1}.")
139
+ continue
140
+
141
+ match = any(ans.lower() in user_ans.lower() for ans in correct_answers)
142
+ if match:
143
+ fb = f"Question {i+1}: βœ… Good job!"
144
+ else:
145
+ fb = f"Question {i+1}: ❌ Missed some key points: {correct_answers[0]}"
146
+ feedback.append(fb)
147
 
148
  return "\n".join(feedback)
149
 
 
150
  def handle_audio(audio_file, chat_history, user_profile):
151
  transcribed = transcribe_audio(audio_file)
152
  if transcribed.startswith("❌"):
 
154
  return chat_history
155
  return respond(transcribed, chat_history, user_profile)
156
 
 
157
  with gr.Blocks() as demo:
158
  user_profile = gr.State({"interview_type": "", "field": "", "interview_in_progress": False})
159
  chat_history = gr.State([])
 
195
  send_btn.click(lambda: "", None, msg, queue=False)
196
  audio_btn.click(handle_audio, inputs=[audio_input, chat_history, user_profile], outputs=[chatbot], queue=False)
197
 
198
+ demo.launch()