maahikachitagi commited on
Commit
000de83
Β·
verified Β·
1 Parent(s): eee904b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +88 -88
app.py CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import re
4
 
5
- # ---- Load and parse questions ----
6
  def load_questions(file_path):
7
  with open(file_path, 'r') as f:
8
  data = f.read()
@@ -20,109 +20,116 @@ def load_questions(file_path):
20
 
21
  all_questions = load_questions('knowledge.txt')
22
 
23
- # ---- Categorize questions ----
 
24
  questions_by_type = {
25
  'Technical': [q for q in all_questions if any(keyword in q['question'].lower() for keyword in [
26
- 'function', 'linked list', 'url', 'rest', 'graphql', 'garbage', 'cap theorem', 'sql', 'hash table',
27
- 'stack', 'queue', 'recursion', 'reverse', 'bfs', 'dfs', 'time complexity', 'binary search tree',
28
- 'web application', 'chat system', 'load balancing', 'caching', 'normalization', 'acid', 'indexing',
29
- 'sql injection', 'https', 'xss', 'hash', 'vulnerabilities'])],
30
-
31
  'Competency-Based Interview': [q for q in all_questions if any(keyword in q['question'].lower() for keyword in [
32
- "Debugging","Learning Fast","Deadlines","Teamwork","Leadership","Mistake Recovery","Conflict Management","Decision Making"])],
33
-
 
 
 
 
 
 
 
34
  'Case': [q for q in all_questions if any(keyword in q['question'].lower() for keyword in [
35
- "A/B Testing","Financial Modeling","Automation","Data Analysis","Regression","Business Opportunity","Stakeholder Alignment"])]
 
 
 
 
 
 
36
  }
37
 
38
- # ---- HF Client ----
 
39
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
40
 
41
  # ---- Interview type selection ----
42
  def set_type(choice, user_profile):
43
- user_profile.update({
44
- "interview_type": choice,
45
- "state": "idle",
46
- "questions": [],
47
- "current_q": 0,
48
- "user_answers": [],
49
- "field": ""
50
- })
51
  return "Great! What’s your background and what field/role are you aiming for?", user_profile
52
 
53
- # ---- Background ----
54
  def save_background(info, user_profile):
55
  user_profile["field"] = info
56
- return "Awesome! Type 'start' below when you're ready to begin your interview.", user_profile
57
 
58
- # ---- Main Respond Logic ----
59
  def respond(message, chat_history, user_profile):
60
- msg = message.strip().lower()
61
- state = user_profile.get("state", "idle")
62
-
63
- # Safety: ensure setup
64
  if not user_profile.get("interview_type") or not user_profile.get("field"):
65
- chat_history.append((message, "Please finish steps 1 and 2 first."))
 
66
  return chat_history
67
 
68
- # START command
69
- if msg == "start" and state == "idle":
70
- selected_questions = questions_by_type.get(user_profile['interview_type'], [])
71
-
 
 
 
72
  if not selected_questions:
73
- chat_history.append((message, "No questions found for this interview type."))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  return chat_history
75
 
76
- user_profile["questions"] = selected_questions
77
- user_profile["current_q"] = 0
78
- user_profile["user_answers"] = []
79
- user_profile["state"] = "intro"
80
 
81
- intro_msg = f"Great, let's begin your {user_profile['interview_type']} interview for the {user_profile['field']} role.\nI'll be asking you several questions to assess your knowledge. Feel free to take your time answering."
82
- chat_history.append((message, intro_msg))
83
- chat_history.append(("", "Type 'next' when you're ready to begin."))
84
- return chat_history
85
 
86
- # INTRO β†’ waiting for user to say "next"
87
- if msg == "next" and state == "intro":
88
- user_profile["state"] = "interviewing"
89
- first_question = user_profile['questions'][0]['question']
90
- chat_history.append((message, f"First question: {first_question}"))
91
- return chat_history
92
 
93
- # STOP anytime during interviewing
94
- if msg == "stop" and state in ["intro", "interviewing"]:
95
- user_profile["state"] = "stopped"
96
- chat_history.append((message, "Interview stopped. You may type 'start' to begin again."))
97
  return chat_history
 
98
 
99
- # FEEDBACK command
100
- if msg == "feedback":
101
- if state != "completed":
102
- chat_history.append((message, "Feedback is only available after completing the interview."))
103
- return chat_history
104
  feedback = generate_feedback(user_profile)
105
  chat_history.append((message, feedback))
106
  return chat_history
107
 
108
- # INTERVIEWING STATE LOGIC
109
- if state == "interviewing":
110
- q_index = user_profile["current_q"]
111
- questions = user_profile["questions"]
112
-
113
- if q_index < len(questions):
114
- user_profile["user_answers"].append(message)
115
- user_profile["current_q"] += 1
116
-
117
- if user_profile["current_q"] < len(questions):
118
- next_q = questions[user_profile["current_q"]]["question"]
119
- chat_history.append((message, f"Next question: {next_q}"))
120
- else:
121
- user_profile["state"] = "completed"
122
- chat_history.append((message, "βœ… Interview complete! Type 'feedback' if you'd like a performance analysis."))
123
- return chat_history
124
-
125
- # ANY OTHER MESSAGE (idle, stopped, etc) β†’ small talk fallback using LLM
126
  messages = [
127
  {"role": "system", "content": f"You are a professional interviewer conducting a {user_profile['interview_type']} interview for a candidate in the {user_profile['field']} field."}
128
  ]
@@ -136,18 +143,13 @@ def respond(message, chat_history, user_profile):
136
  chat_history.append((message, bot_msg))
137
  return chat_history
138
 
139
- # ---- Feedback ----
140
  def generate_feedback(user_profile):
141
  feedback = []
142
  questions = user_profile.get('questions', [])
143
  answers = user_profile.get('user_answers', [])
144
 
145
- num_questions = min(len(questions), len(answers))
146
- if num_questions == 0:
147
- return "No completed interview found."
148
-
149
- for i in range(num_questions):
150
- user_ans = answers[i]
151
  correct_answers = questions[i]['answers']
152
  match = any(ans.lower() in user_ans.lower() for ans in correct_answers)
153
  if match:
@@ -157,21 +159,19 @@ def generate_feedback(user_profile):
157
  feedback.append(fb)
158
  return "\n".join(feedback)
159
 
160
- # ---- Gradio UI ----
161
  with gr.Blocks() as demo:
162
- user_profile = gr.State({
163
- "interview_type": "", "field": "", "state": "idle",
164
- "questions": [], "current_q": 0, "user_answers": []
165
- })
166
  chat_history = gr.State([])
167
 
168
  gr.Markdown("# 🎀 Welcome to Intervu")
169
 
170
  gr.Markdown("### Step 1: Choose Interview Type")
171
  with gr.Row():
172
- btn1 = gr.Button("Technical")
173
- btn2 = gr.Button("Competency-Based Interview")
174
- btn3 = gr.Button("Case")
 
175
  type_output = gr.Textbox(label="Bot response", interactive=False)
176
 
177
  btn1.click(set_type, inputs=[gr.Textbox(value="Technical", visible=False), user_profile], outputs=[type_output, user_profile])
 
2
  from huggingface_hub import InferenceClient
3
  import re
4
 
5
+ # ---- Load and parse questions from knowledge.txt ----
6
  def load_questions(file_path):
7
  with open(file_path, 'r') as f:
8
  data = f.read()
 
20
 
21
  all_questions = load_questions('knowledge.txt')
22
 
23
+ # ---- Simple way to assign questions to interview types ----
24
+ # You can replace this later with better tagging
25
  questions_by_type = {
26
  'Technical': [q for q in all_questions if any(keyword in q['question'].lower() for keyword in [
27
+ 'function', 'linked list', 'url', 'rest', 'graphql', 'garbage', 'cap theorem', 'sql', 'hash table',
28
+ 'stack', 'queue', 'recursion', 'reverse', 'bfs', 'dfs', 'time complexity', 'binary search tree',
29
+ 'web application', 'chat system', 'load balancing', 'caching', 'normalization', 'acid', 'indexing',
30
+ 'sql injection', 'https', 'xss', 'hash', 'vulnerabilities'])],
31
+
32
  'Competency-Based Interview': [q for q in all_questions if any(keyword in q['question'].lower() for keyword in [
33
+ "Debugging",
34
+ "Learning Fast",
35
+ "Deadlines",
36
+ "Teamwork",
37
+ "Leadership",
38
+ "Mistake Recovery",
39
+ "Conflict Management",
40
+ "Decision Making"])],
41
+
42
  'Case': [q for q in all_questions if any(keyword in q['question'].lower() for keyword in [
43
+ "A/B Testing",
44
+ "Financial Modeling",
45
+ "Automation",
46
+ "Data Analysis",
47
+ "Regression",
48
+ "Business Opportunity",
49
+ "Stakeholder Alignment"])]
50
  }
51
 
52
+
53
+ # ---- Hugging Face Client ----
54
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
55
 
56
  # ---- Interview type selection ----
57
  def set_type(choice, user_profile):
58
+ user_profile["interview_type"] = choice
 
 
 
 
 
 
 
59
  return "Great! What’s your background and what field/role are you aiming for?", user_profile
60
 
61
+ # ---- Save background ----
62
  def save_background(info, user_profile):
63
  user_profile["field"] = info
64
+ return "Awesome! Type 'start' below to begin your interview.", user_profile
65
 
66
+ # ---- Main respond logic ----
67
  def respond(message, chat_history, user_profile):
 
 
 
 
68
  if not user_profile.get("interview_type") or not user_profile.get("field"):
69
+ bot_msg = "Please finish steps 1 and 2 before starting the interview."
70
+ chat_history.append((message, bot_msg))
71
  return chat_history
72
 
73
+ # Start interview logic
74
+ if message.strip().lower() == 'start':
75
+ interview_type = user_profile['interview_type']
76
+ selected_questions = questions_by_type.get(interview_type, [])
77
+ user_profile['questions'] = selected_questions
78
+ user_profile['current_q'] = 0
79
+ user_profile['user_answers'] = []
80
  if not selected_questions:
81
+ bot_msg = "No questions available for this interview type."
82
+ else:
83
+ bot_msg = f"First question: {selected_questions[0]['question']}"
84
+ chat_history.append((message, bot_msg))
85
+ return chat_history
86
+
87
+ # If interview is ongoing
88
+ # if user_profile.get("questions"):
89
+ # q_index = user_profile['current_q']
90
+ # user_profile['user_answers'].append(message)
91
+
92
+ # q_index += 1
93
+ # user_profile['current_q'] = q_index
94
+
95
+ # if q_index < len(user_profile['questions']):
96
+ # bot_msg = f"Next question: {user_profile['questions'][q_index]['question']}"
97
+ # else:
98
+ # bot_msg = "Interview complete! Type 'feedback' if you'd like me to analyze your answers."
99
+ # chat_history.append((message, bot_msg))
100
+ # return chat_history
101
+ if user_profile.get("questions"):
102
+
103
+ # --- NEW STOP LOGIC ---
104
+ if message.strip().lower() == 'stop':
105
+ bot_msg = "Thank you for chatting with Intervu! The interview has been stopped. Type 'feedback' if you'd like me to analyze your answers."
106
+ chat_history.append((message, bot_msg))
107
+ user_profile['questions'] = [] # clear questions list to stop
108
  return chat_history
109
 
110
+ # Existing interview logic continues here:
111
+ q_index = user_profile['current_q']
112
+ user_profile['user_answers'].append(message)
 
113
 
114
+ q_index += 1
115
+ user_profile['current_q'] = q_index
 
 
116
 
117
+ if q_index < len(user_profile['questions']):
118
+ bot_msg = f"Next question: {user_profile['questions'][q_index]['question']}"
119
+ else:
120
+ bot_msg = "Interview complete! Type 'feedback' if you'd like me to analyze your answers."
 
 
121
 
122
+ chat_history.append((message, bot_msg))
 
 
 
123
  return chat_history
124
+
125
 
126
+ # Handle feedback request
127
+ if message.strip().lower() == 'feedback':
 
 
 
128
  feedback = generate_feedback(user_profile)
129
  chat_history.append((message, feedback))
130
  return chat_history
131
 
132
+ # Default fallback
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
  messages = [
134
  {"role": "system", "content": f"You are a professional interviewer conducting a {user_profile['interview_type']} interview for a candidate in the {user_profile['field']} field."}
135
  ]
 
143
  chat_history.append((message, bot_msg))
144
  return chat_history
145
 
146
+ # ---- Simple feedback function (keyword based for now) ----
147
  def generate_feedback(user_profile):
148
  feedback = []
149
  questions = user_profile.get('questions', [])
150
  answers = user_profile.get('user_answers', [])
151
 
152
+ for i, user_ans in enumerate(answers):
 
 
 
 
 
153
  correct_answers = questions[i]['answers']
154
  match = any(ans.lower() in user_ans.lower() for ans in correct_answers)
155
  if match:
 
159
  feedback.append(fb)
160
  return "\n".join(feedback)
161
 
162
+ # ---- Gradio Interface ----
163
  with gr.Blocks() as demo:
164
+ user_profile = gr.State({"interview_type": "", "field": ""})
 
 
 
165
  chat_history = gr.State([])
166
 
167
  gr.Markdown("# 🎀 Welcome to Intervu")
168
 
169
  gr.Markdown("### Step 1: Choose Interview Type")
170
  with gr.Row():
171
+ with gr.Column():
172
+ btn1 = gr.Button("Technical")
173
+ btn2 = gr.Button("Competency-Based Interview")
174
+ btn3 = gr.Button("Case")
175
  type_output = gr.Textbox(label="Bot response", interactive=False)
176
 
177
  btn1.click(set_type, inputs=[gr.Textbox(value="Technical", visible=False), user_profile], outputs=[type_output, user_profile])