Spaces:
Sleeping
Sleeping
| # imports | |
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| import random | |
| import whisper | |
| from pydub import AudioSegment | |
| # models | |
| client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") | |
| whisper_model = whisper.load_model("base") | |
| # whisper audio-to-text function | |
| def transcribe_audio(file_path): | |
| try: | |
| print(f"📂 Processing audio: {file_path}") | |
| audio = AudioSegment.from_file(file_path) | |
| converted_path = "converted.wav" | |
| audio.export(converted_path, format="wav") | |
| result = whisper_model.transcribe(converted_path, fp16=False) | |
| return result["text"] | |
| except Exception as e: | |
| return f"❌ ERROR: {str(e)}" | |
| # setting up the users profile (step 1) | |
| def set_type(choice, user_profile): | |
| user_profile["interview_type"] = choice | |
| return "Great! What’s your background and what field/role are you aiming for?", user_profile | |
| # step 2 | |
| def save_background(info, user_profile): | |
| user_profile["field"] = info | |
| return "Awesome! Type 'start' below to begin your interview.", user_profile | |
| # generate question using LLM | |
| def generate_question(user_profile): | |
| system_prompt = f"You are a professional interviewer conducting a {user_profile['interview_type']} interview for a candidate in {user_profile['field']}. Generate one thoughtful, clear, and concise interview question." | |
| messages = [{"role": "system", "content": system_prompt}] | |
| response = client.chat_completion(messages, max_tokens=100, stream=False) | |
| return response.choices[0].message.content.strip() | |
| # generate feedback using LLM | |
| def generate_feedback_llm(user_profile): | |
| feedback = [] | |
| for i, (question, answer) in enumerate(zip(user_profile.get("questions", []), user_profile.get("user_answers", []))): | |
| messages = [ | |
| {"role": "system", "content": f"You are a professional interviewer providing feedback for a candidate's response in a {user_profile['interview_type']} interview for a {user_profile['field']} role."}, | |
| {"role": "user", "content": f"Question: {question}\nAnswer: {answer}\nPlease give specific, constructive feedback."} | |
| ] | |
| response = client.chat_completion(messages, max_tokens=150, stream=False) | |
| feedback.append(f"Question {i+1}: {response.choices[0].message.content.strip()}") | |
| return "\n\n".join(feedback) | |
| # step 3: interview loop | |
| def respond(message, chat_history, user_profile): | |
| message_lower = message.strip().lower() | |
| if not user_profile.get("interview_type") or not user_profile.get("field"): | |
| bot_msg = "Please finish steps 1 and 2 before starting the interview." | |
| chat_history.append((message, bot_msg)) | |
| return chat_history | |
| if message_lower == 'start': | |
| user_profile['questions'] = [] | |
| user_profile['user_answers'] = [] | |
| user_profile['current_q'] = 0 | |
| user_profile['interview_in_progress'] = True | |
| intro = f"Welcome to your {user_profile['interview_type']} interview for a {user_profile['field']} position. I will ask you up to 10 questions. Type 'stop' anytime to end." | |
| first_q = generate_question(user_profile) | |
| user_profile['questions'].append(first_q) | |
| chat_history.append((message, intro)) | |
| chat_history.append(("", f"First question: {first_q}")) | |
| return chat_history | |
| if message_lower == 'stop' and user_profile.get("interview_in_progress"): | |
| user_profile['interview_in_progress'] = False | |
| bot_msg = "Interview stopped. Type 'feedback' if you'd like me to analyze your answers. Thanks for interviewing with Intervu!" | |
| chat_history.append((message, bot_msg)) | |
| return chat_history | |
| if message_lower == 'feedback': | |
| feedback = generate_feedback_llm(user_profile) | |
| chat_history.append((message, feedback)) | |
| return chat_history | |
| if user_profile.get("interview_in_progress"): | |
| user_profile['user_answers'].append(message) | |
| user_profile['current_q'] += 1 | |
| if user_profile['current_q'] < 10: | |
| next_q = generate_question(user_profile) | |
| user_profile['questions'].append(next_q) | |
| bot_msg = f"Next question: {next_q}" | |
| else: | |
| user_profile['interview_in_progress'] = False | |
| bot_msg = "Interview complete! Type 'feedback' if you'd like me to analyze your answers. Thanks for interviewing with Intervu!" | |
| chat_history.append((message, bot_msg)) | |
| return chat_history | |
| # fallback LLM response | |
| messages = [ | |
| {"role": "system", "content": f"You are a professional interviewer conducting a {user_profile['interview_type']} interview for a candidate in {user_profile['field']}."}, | |
| {"role": "user", "content": message} | |
| ] | |
| response = client.chat_completion(messages, max_tokens=150, stream=False) | |
| bot_msg = response.choices[0].message.content.strip() | |
| chat_history.append((message, bot_msg)) | |
| return chat_history | |
| # handle audio input | |
| def handle_audio(audio_file, chat_history, user_profile): | |
| transcribed = transcribe_audio(audio_file) | |
| if transcribed.startswith("❌"): | |
| chat_history.append(("Audio input", transcribed)) | |
| return chat_history | |
| return respond(transcribed, chat_history, user_profile) | |
| # UI | |
| with gr.Blocks() as demo: | |
| user_profile = gr.State({"interview_type": "", "field": "", "interview_in_progress": False}) | |
| chat_history = gr.State([]) | |
| gr.Markdown("# Welcome to Intervu") | |
| gr.Image(value="images.JPEG", show_label=False, width=200) | |
| gr.Markdown("### Step 1: Choose Interview Type") | |
| with gr.Row(): | |
| with gr.Column(): | |
| btn1 = gr.Button("Technical") | |
| btn2 = gr.Button("Competency-Based Interview") | |
| btn3 = gr.Button("Case") | |
| type_output = gr.Textbox(label="Bot response", interactive=False) | |
| btn1.click(set_type, inputs=[gr.Textbox(value="Technical", visible=False), user_profile], outputs=[type_output, user_profile]) | |
| btn2.click(set_type, inputs=[gr.Textbox(value="Competency-Based Interview", visible=False), user_profile], outputs=[type_output, user_profile]) | |
| btn3.click(set_type, inputs=[gr.Textbox(value="Case", visible=False), user_profile], outputs=[type_output, user_profile]) | |
| gr.Markdown("### Step 2: Enter Your Background") | |
| background = gr.Textbox(label="Your background and field/goal") | |
| background_btn = gr.Button("Submit") | |
| background_output = gr.Textbox(label="Bot response", interactive=False) | |
| background_btn.click(save_background, inputs=[background, user_profile], outputs=[background_output, user_profile]) | |
| gr.Markdown("### Step 3: Start Interview") | |
| chatbot = gr.Chatbot(label="Interview Bot") | |
| with gr.Row(): | |
| msg = gr.Textbox(label="Your message") | |
| audio_input = gr.Audio(type="filepath", label="🎧 Upload or Record your answer") | |
| with gr.Row(): | |
| send_btn = gr.Button("Send Text") | |
| audio_btn = gr.Button("Send Audio") | |
| send_btn.click(respond, inputs=[msg, chat_history, user_profile], outputs=[chatbot], queue=False) | |
| send_btn.click(lambda: "", None, msg, queue=False) | |
| audio_btn.click(handle_audio, inputs=[audio_input, chat_history, user_profile], outputs=[chatbot], queue=False) | |
| if __name__ == "__main__": | |
| demo.launch() |