maahikachitagi commited on
Commit
973e767
Β·
verified Β·
1 Parent(s): 52c2de1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -44
app.py CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import re
4
 
5
- # ---- Load and parse questions from knowledge.txt ----
6
  def load_questions(file_path):
7
  with open(file_path, 'r') as f:
8
  data = f.read()
@@ -20,88 +20,104 @@ def load_questions(file_path):
20
 
21
  all_questions = load_questions('knowledge.txt')
22
 
23
- # ---- Tagging interview questions ----
24
  questions_by_type = {
25
  'Technical': [q for q in all_questions if any(keyword in q['question'].lower() for keyword in [
26
- 'function', 'linked list', 'url', 'rest', 'graphql', 'garbage', 'cap theorem', 'sql', 'hash table',
27
- 'stack', 'queue', 'recursion', 'reverse', 'bfs', 'dfs', 'time complexity', 'binary search tree',
28
- 'web application', 'chat system', 'load balancing', 'caching', 'normalization', 'acid', 'indexing',
29
- 'sql injection', 'https', 'xss', 'hash', 'vulnerabilities'])],
30
-
31
  'Competency-Based Interview': [q for q in all_questions if any(keyword in q['question'].lower() for keyword in [
32
- "Debugging","Learning Fast","Deadlines","Teamwork","Leadership","Mistake Recovery","Conflict Management","Decision Making"])],
33
-
34
  'Case': [q for q in all_questions if any(keyword in q['question'].lower() for keyword in [
35
- "A/B Testing","Financial Modeling","Automation","Data Analysis","Regression","Business Opportunity","Stakeholder Alignment"])]
36
  }
37
 
38
- # ---- Hugging Face Client ----
39
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
40
 
41
- # ---- Set interview type ----
42
  def set_type(choice, user_profile):
43
- user_profile["interview_type"] = choice
 
 
 
44
  return "Great! What’s your background and what field/role are you aiming for?", user_profile
45
 
46
- # ---- Save background ----
47
  def save_background(info, user_profile):
48
  user_profile["field"] = info
49
- return "Awesome! Type 'start' below to begin your interview.", user_profile
50
 
51
- # ---- Main respond logic ----
52
  def respond(message, chat_history, user_profile):
53
  message = message.strip().lower()
54
 
 
55
  if not user_profile.get("interview_type") or not user_profile.get("field"):
56
- bot_msg = "Please finish steps 1 and 2 before starting the interview."
57
- chat_history.append((message, bot_msg))
58
  return chat_history
59
 
60
- # Start interview
61
- if message == 'start':
 
 
62
  interview_type = user_profile['interview_type']
63
  selected_questions = questions_by_type.get(interview_type, [])
64
- user_profile['questions'] = selected_questions
65
- user_profile['current_q'] = 0
66
- user_profile['user_answers'] = []
67
  if not selected_questions:
68
- bot_msg = "No questions available for this interview type."
69
- else:
70
- bot_msg = f"First question: {selected_questions[0]['question']}"
71
- chat_history.append((message, bot_msg))
 
 
 
 
 
 
 
 
 
 
72
  return chat_history
73
 
74
- # Stop interview early
75
- if message == 'stop':
76
- user_profile['questions'] = []
77
- bot_msg = "Interview stopped. Type 'start' to begin again or 'feedback' to get feedback."
78
- chat_history.append((message, bot_msg))
79
  return chat_history
80
 
81
  # Handle feedback
82
  if message == 'feedback':
83
- if not user_profile.get("questions"):
84
- chat_history.append((message, "No completed interview found. Please complete the interview first."))
85
  return chat_history
 
86
  feedback = generate_feedback(user_profile)
87
  chat_history.append((message, feedback))
88
  return chat_history
89
 
90
- # Handle interview questions
91
- if user_profile.get("questions"):
92
  q_index = user_profile['current_q']
93
- if q_index < len(user_profile['questions']):
 
 
94
  user_profile['user_answers'].append(message)
95
  user_profile['current_q'] += 1
96
 
97
- if user_profile['current_q'] < len(user_profile['questions']):
98
- next_question = user_profile['questions'][user_profile['current_q']]['question']
99
  chat_history.append((message, f"Next question: {next_question}"))
100
  else:
101
- chat_history.append((message, "Interview complete! Type 'feedback' if you'd like an analysis."))
 
102
  return chat_history
103
 
104
- # General fallback chat using LLM if no interview ongoing
105
  messages = [
106
  {"role": "system", "content": f"You are a professional interviewer conducting a {user_profile['interview_type']} interview for a candidate in the {user_profile['field']} field."}
107
  ]
@@ -115,7 +131,7 @@ def respond(message, chat_history, user_profile):
115
  chat_history.append((message, bot_msg))
116
  return chat_history
117
 
118
- # ---- Feedback generator ----
119
  def generate_feedback(user_profile):
120
  feedback = []
121
  questions = user_profile.get('questions', [])
@@ -136,9 +152,9 @@ def generate_feedback(user_profile):
136
  feedback.append(fb)
137
  return "\n".join(feedback)
138
 
139
- # ---- Gradio interface ----
140
  with gr.Blocks() as demo:
141
- user_profile = gr.State({"interview_type": "", "field": ""})
142
  chat_history = gr.State([])
143
 
144
  gr.Markdown("# 🎀 Welcome to Intervu")
 
2
  from huggingface_hub import InferenceClient
3
  import re
4
 
5
+ # ---- Load and parse questions ----
6
  def load_questions(file_path):
7
  with open(file_path, 'r') as f:
8
  data = f.read()
 
20
 
21
  all_questions = load_questions('knowledge.txt')
22
 
23
+ # ---- Categorize questions ----
24
  questions_by_type = {
25
  'Technical': [q for q in all_questions if any(keyword in q['question'].lower() for keyword in [
26
+ 'function', 'linked list', 'url', 'rest', 'graphql', 'garbage', 'cap theorem', 'sql', 'hash table',
27
+ 'stack', 'queue', 'recursion', 'reverse', 'bfs', 'dfs', 'time complexity', 'binary search tree',
28
+ 'web application', 'chat system', 'load balancing', 'caching', 'normalization', 'acid', 'indexing',
29
+ 'sql injection', 'https', 'xss', 'hash', 'vulnerabilities'])],
30
+
31
  'Competency-Based Interview': [q for q in all_questions if any(keyword in q['question'].lower() for keyword in [
32
+ "Debugging","Learning Fast","Deadlines","Teamwork","Leadership","Mistake Recovery","Conflict Management","Decision Making"])],
33
+
34
  'Case': [q for q in all_questions if any(keyword in q['question'].lower() for keyword in [
35
+ "A/B Testing","Financial Modeling","Automation","Data Analysis","Regression","Business Opportunity","Stakeholder Alignment"])]
36
  }
37
 
38
+ # ---- HF Client ----
39
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
40
 
41
+ # ---- Interview type selection ----
42
  def set_type(choice, user_profile):
43
+ user_profile.update({
44
+ "interview_type": choice,
45
+ "state": "idle"
46
+ })
47
  return "Great! What’s your background and what field/role are you aiming for?", user_profile
48
 
49
+ # ---- Background ----
50
  def save_background(info, user_profile):
51
  user_profile["field"] = info
52
+ return "Awesome! Type 'start' below when you're ready to begin your interview.", user_profile
53
 
54
+ # ---- Main Respond Logic ----
55
  def respond(message, chat_history, user_profile):
56
  message = message.strip().lower()
57
 
58
+ # Safety: Ensure interview type + field are set
59
  if not user_profile.get("interview_type") or not user_profile.get("field"):
60
+ chat_history.append((message, "Please finish steps 1 and 2 first."))
 
61
  return chat_history
62
 
63
+ state = user_profile.get("state", "idle")
64
+
65
+ # Handle start
66
+ if message == 'start' and state == 'idle':
67
  interview_type = user_profile['interview_type']
68
  selected_questions = questions_by_type.get(interview_type, [])
69
+
 
 
70
  if not selected_questions:
71
+ chat_history.append((message, "No questions found for this interview type."))
72
+ return chat_history
73
+
74
+ user_profile.update({
75
+ 'questions': selected_questions,
76
+ 'current_q': 0,
77
+ 'user_answers': [],
78
+ 'state': 'running'
79
+ })
80
+
81
+ intro_msg = f"Great, let's begin your {interview_type} interview for the {user_profile['field']} role.\nI’ll be asking you a few questions to assess your knowledge. Take your time!"
82
+ first_q = selected_questions[0]['question']
83
+ chat_history.append((message, intro_msg))
84
+ chat_history.append(("", f"First question: {first_q}"))
85
  return chat_history
86
 
87
+ # Handle stop
88
+ if message == 'stop' and state == 'running':
89
+ user_profile["state"] = "stopped"
90
+ chat_history.append((message, "Interview stopped. Type 'start' to begin again or 'feedback' if you'd like an analysis (if available)."))
 
91
  return chat_history
92
 
93
  # Handle feedback
94
  if message == 'feedback':
95
+ if state != 'completed':
96
+ chat_history.append((message, "Feedback is only available after completing the interview."))
97
  return chat_history
98
+
99
  feedback = generate_feedback(user_profile)
100
  chat_history.append((message, feedback))
101
  return chat_history
102
 
103
+ # Handle interview question flow
104
+ if state == 'running':
105
  q_index = user_profile['current_q']
106
+ questions = user_profile['questions']
107
+
108
+ if q_index < len(questions):
109
  user_profile['user_answers'].append(message)
110
  user_profile['current_q'] += 1
111
 
112
+ if user_profile['current_q'] < len(questions):
113
+ next_question = questions[user_profile['current_q']]['question']
114
  chat_history.append((message, f"Next question: {next_question}"))
115
  else:
116
+ user_profile['state'] = 'completed'
117
+ chat_history.append((message, "βœ… Interview complete! You can type 'feedback' to receive an analysis."))
118
  return chat_history
119
 
120
+ # Handle idle/stopped fallback small talk using LLM
121
  messages = [
122
  {"role": "system", "content": f"You are a professional interviewer conducting a {user_profile['interview_type']} interview for a candidate in the {user_profile['field']} field."}
123
  ]
 
131
  chat_history.append((message, bot_msg))
132
  return chat_history
133
 
134
+ # ---- Feedback ----
135
  def generate_feedback(user_profile):
136
  feedback = []
137
  questions = user_profile.get('questions', [])
 
152
  feedback.append(fb)
153
  return "\n".join(feedback)
154
 
155
+ # ---- Gradio UI ----
156
  with gr.Blocks() as demo:
157
+ user_profile = gr.State({"interview_type": "", "field": "", "state": "idle"})
158
  chat_history = gr.State([])
159
 
160
  gr.Markdown("# 🎀 Welcome to Intervu")