Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| import re | |
| import random | |
| # Load questions (your original backend) | |
| def load_questions(file_path): | |
| with open(file_path, 'r') as f: | |
| data = f.read() | |
| question_blocks = re.split(r'Question:\s*', data)[1:] | |
| questions = [] | |
| for block in question_blocks: | |
| parts = block.split('Possible Answers:') | |
| question_text = parts[0].strip() | |
| answers_text = parts[1].strip() | |
| possible_answers = [ans.strip() for ans in re.split(r'\d+\.\s+', answers_text) if ans.strip()] | |
| questions.append({'question': question_text, 'answers': possible_answers}) | |
| return questions | |
| all_questions = load_questions('knowledge.txt') | |
| # Question categorization (same as your existing code) | |
| questions_by_type = { | |
| 'Technical': [q for q in all_questions if any(keyword in q['question'].lower() for keyword in [ | |
| 'function', 'linked list', 'url', 'rest', 'graphql', 'garbage', 'cap theorem', 'sql', 'hash table', | |
| 'stack', 'queue', 'recursion', 'reverse', 'bfs', 'dfs', 'time complexity', 'binary search tree', | |
| 'web application', 'chat system', 'load balancing', 'caching', 'normalization', 'acid', 'indexing', | |
| 'sql injection', 'https', 'xss', 'hash', 'vulnerabilities'])], | |
| 'Competency-Based Interview': [q for q in all_questions if any(keyword in q['question'].lower() for keyword in [ | |
| "debugging", "learning", "deadlines", "teamwork", "leadership", "mistake", "conflict", "decision"])], | |
| 'Case': [q for q in all_questions if any(keyword in q['question'].lower() for keyword in [ | |
| "testing", "financial", "automation", "analysis", "regression", "business", "stakeholder"])] | |
| } | |
| client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") | |
| # Backend logic (all functions same as before — no changes) | |
| def set_type(choice, user_profile): | |
| user_profile["interview_type"] = choice | |
| return "Great! What’s your background and what field/role are you aiming for?", user_profile | |
| def save_background(info, user_profile): | |
| user_profile["field"] = info | |
| return "Awesome! Type 'start' below to begin your interview.", user_profile | |
| def respond(message, chat_history, user_profile): | |
| message_lower = message.strip().lower() | |
| if not user_profile.get("interview_type") or not user_profile.get("field"): | |
| bot_msg = "Please finish steps 1 and 2 before starting the interview." | |
| chat_history.append((message, bot_msg)) | |
| return chat_history | |
| if message_lower == 'start': | |
| interview_type = user_profile['interview_type'] | |
| selected_questions = questions_by_type.get(interview_type, []) | |
| random.shuffle(selected_questions) | |
| selected_questions = selected_questions[:10] | |
| user_profile['questions'] = selected_questions | |
| user_profile['current_q'] = 0 | |
| user_profile['user_answers'] = [] | |
| user_profile['interview_in_progress'] = True | |
| intro = f"Welcome to your {interview_type} interview for a {user_profile['field']} position. I will ask you up to 10 questions. Type 'stop' anytime to end." | |
| first_q = f"First question: {selected_questions[0]['question']}" | |
| chat_history.append((message, intro)) | |
| chat_history.append(("", first_q)) | |
| return chat_history | |
| if message_lower == 'stop' and user_profile.get("interview_in_progress"): | |
| user_profile['interview_in_progress'] = False | |
| bot_msg = "Interview stopped. Type 'feedback' if you'd like me to analyze your answers." | |
| chat_history.append((message, bot_msg)) | |
| return chat_history | |
| if user_profile.get("interview_in_progress"): | |
| q_index = user_profile['current_q'] | |
| user_profile['user_answers'].append(message) | |
| q_index += 1 | |
| user_profile['current_q'] = q_index | |
| if q_index < len(user_profile['questions']): | |
| bot_msg = f"Next question: {user_profile['questions'][q_index]['question']}" | |
| else: | |
| user_profile['interview_in_progress'] = False | |
| bot_msg = "Interview complete! Type 'feedback' if you'd like me to analyze your answers." | |
| chat_history.append((message, bot_msg)) | |
| return chat_history | |
| if message_lower == 'feedback': | |
| feedback = generate_feedback(user_profile) | |
| chat_history.append((message, feedback)) | |
| return chat_history | |
| # Normal chatbot conversation | |
| messages = [{"role": "system", "content": f"You are a professional interviewer conducting a {user_profile['interview_type']} interview for a candidate in {user_profile['field']}."}] | |
| for q, a in chat_history: | |
| messages.append({"role": "user", "content": q}) | |
| messages.append({"role": "assistant", "content": a}) | |
| messages.append({"role": "user", "content": message}) | |
| response = client.chat_completion(messages, max_tokens=150, stream=False) | |
| bot_msg = response.choices[0].message.content | |
| chat_history.append((message, bot_msg)) | |
| return chat_history | |
| def generate_feedback(user_profile): | |
| feedback = [] | |
| questions = user_profile.get('questions', []) | |
| answers = user_profile.get('user_answers', []) | |
| for i, user_ans in enumerate(answers): | |
| correct_answers = questions[i]['answers'] | |
| match = any(ans.lower() in user_ans.lower() for ans in correct_answers) | |
| if match: | |
| fb = f"Question {i+1}: ✅ Good job!" | |
| else: | |
| fb = f"Question {i+1}: ❌ Missed key points: {correct_answers[0]}" | |
| feedback.append(fb) | |
| return "\n".join(feedback) | |
| # The new Intervu 2.0 UI with your design! | |
| with gr.Blocks(css=""" | |
| body { background-color: #f2f4f8; font-family: 'Segoe UI', sans-serif; } | |
| h1 { text-align: center; color: #2c3e50; } | |
| img { display: block; margin: auto; width: 100px; border-radius: 20px; } | |
| button { font-size: 16px; padding: 10px 20px; border-radius: 10px; border: none; background-color: #3B82F6; color: white; } | |
| button:hover { background-color: #2563EB; } | |
| .gr-chatbot { background-color: white; border-radius: 15px; padding: 20px; } | |
| """) as demo: | |
| gr.Markdown("") | |
| user_profile = gr.State({"interview_type": "", "field": "", "interview_in_progress": False}) | |
| chat_history = gr.State([]) | |
| # Header | |
| gr.Markdown(""" | |
| <div style='text-align:center;'> | |
| <img src="Untitled design.jpg"> | |
| <h1>Welcome to <b>Intervu</b></h1> | |
| </div> | |
| """) | |
| # Step 1 - Choose Interview Type | |
| gr.Markdown("### Step 1: Choose Interview Type") | |
| with gr.Row(): | |
| btn1 = gr.Button("Technical") | |
| btn2 = gr.Button("Competency-Based Interview") | |
| btn3 = gr.Button("Case") | |
| type_output = gr.Textbox(label="Bot response", interactive=False) | |
| btn1.click(set_type, inputs=[gr.Textbox(value="Technical", visible=False), user_profile], outputs=[type_output, user_profile]) | |
| btn2.click(set_type, inputs=[gr.Textbox(value="Competency-Based Interview", visible=False), user_profile], outputs=[type_output, user_profile]) | |
| btn3.click(set_type, inputs=[gr.Textbox(value="Case", visible=False), user_profile], outputs=[type_output, user_profile]) | |
| # Step 2 - Enter Background | |
| gr.Markdown("### Step 2: Enter Your Background") | |
| background = gr.Textbox(label="Your background and field/goal") | |
| background_btn = gr.Button("Submit") | |
| background_output = gr.Textbox(label="Bot response", interactive=False) | |
| background_btn.click(save_background, inputs=[background, user_profile], outputs=[background_output, user_profile]) | |
| # Step 3 - Chatbot Mode Selection | |
| gr.Markdown("### Choose Chat Mode") | |
| with gr.Row(): | |
| gr.Button("Text-Based") # You can build voice & webcam later :) | |
| # Chat interface | |
| chatbot = gr.Chatbot(label="Interview Chat") | |
| msg = gr.Textbox(label="Type 'start' to begin") | |
| send_btn = gr.Button("Send") | |
| send_btn.click(respond, inputs=[msg, chat_history, user_profile], outputs=[chatbot], queue=False) | |
| send_btn.click(lambda: "", None, msg, queue=False) | |
| demo.launch() | |