Spaces:
Runtime error
Runtime error
| import os | |
| import re | |
| import gradio as gr | |
| from groq import Groq | |
| # ----------------------------- | |
| # Groq Initialization | |
| # ----------------------------- | |
| client = Groq(api_key=os.environ.get("GROQ_API_KEY")) | |
| MODEL_NAME = "llama-3.1-8b-instant" | |
| # ----------------------------- | |
| # System Prompt | |
| # ----------------------------- | |
| SYSTEM_PROMPT = """ | |
| You are an elite Senior Industry Interviewer and AI Interview Coach from a top global technology company. | |
| Conduct a structured mock interview. | |
| INTERVIEW STRUCTURE: | |
| PHASE 1 – Introduction: Greet, confirm role, warm-up. | |
| PHASE 2 – Technical: One question at a time, adjust difficulty. | |
| PHASE 3 – Behavioral: Teamwork, leadership, problem-solving, failure. | |
| PHASE 4 – Rapid Fire: 5 short questions. | |
| RULES: | |
| - Professional | |
| - Do not break role | |
| - No feedback until candidate types EXACTLY: END INTERVIEW | |
| Generate report in this format: | |
| INTERVIEW SUMMARY REPORT | |
| Candidate Role: <role> | |
| Technical Knowledge Score: X/10 | |
| Problem Solving Score: X/10 | |
| Communication Score: X/10 | |
| Confidence Score: X/10 | |
| Overall Hire Recommendation: Strong Hire / Hire / Borderline / Reject | |
| Strengths: | |
| • Bullet points | |
| Areas for Improvement: | |
| • Bullet points | |
| Suggested Preparation Strategy: | |
| • Bullet points | |
| Never explain scoring logic. | |
| Never say you are AI. | |
| """ | |
| # ----------------------------- | |
| # Function to stream chat from Groq | |
| # ----------------------------- | |
| def chat_with_llama_stream(user_message, backend_messages): | |
| # Build fresh list of dicts for Groq API | |
| messages_for_groq = [{"role": "system", "content": SYSTEM_PROMPT}] | |
| for msg in backend_messages: | |
| messages_for_groq.append({"role": msg["role"], "content": msg["content"]}) | |
| messages_for_groq.append({"role": "user", "content": user_message}) | |
| # Stream response | |
| response = client.chat.completions.create( | |
| model=MODEL_NAME, | |
| messages=messages_for_groq, | |
| stream=True, | |
| ) | |
| full_response = "" | |
| for chunk in response: | |
| if chunk.choices[0].delta.content: | |
| full_response += chunk.choices[0].delta.content | |
| yield full_response | |
| # ----------------------------- | |
| # Confidence extractor | |
| # ----------------------------- | |
| def extract_confidence_score(text): | |
| match = re.search(r"Confidence Score:\s*(\d+)/10", text) | |
| if match: | |
| return int(match.group(1)) | |
| return None | |
| # ----------------------------- | |
| # Gradio UI | |
| # ----------------------------- | |
| with gr.Blocks(theme=gr.themes.Soft()) as demo: | |
| gr.Markdown("## 🎯 AI Mock Interview System") | |
| gr.Markdown("Powered by Groq + LLaMA 3.1") | |
| chatbot = gr.Chatbot([], elem_id="chatbot") | |
| msg = gr.Textbox(placeholder="Enter your role or answer...", label="Your Input") | |
| confidence_display = gr.Number(label="Confidence Score", interactive=False) | |
| backend_messages = gr.State([]) # always store as list of dicts | |
| def respond(user_message, backend_messages, chat_display): | |
| backend_messages = backend_messages or [] | |
| chat_display = chat_display or [] | |
| # Add user to UI display | |
| chat_display.append((user_message, "")) | |
| assistant_text = "" | |
| for partial in chat_with_llama_stream(user_message, backend_messages): | |
| assistant_text = partial | |
| # Update last assistant message in UI | |
| chat_display[-1] = (user_message, assistant_text) | |
| yield chat_display, backend_messages, None | |
| # Add user & assistant to backend messages (dicts only) | |
| backend_messages.append({"role": "user", "content": user_message}) | |
| backend_messages.append({"role": "assistant", "content": assistant_text}) | |
| # Extract confidence score | |
| score = extract_confidence_score(assistant_text) | |
| yield chat_display, backend_messages, score | |
| msg.submit(respond, [msg, backend_messages, chatbot], [chatbot, backend_messages, confidence_display]) | |
| demo.launch() | |