File size: 3,894 Bytes
1629529
 
 
 
 
d459ded
1283b39
d459ded
a0753d0
1629529
 
ca216d4
1283b39
ca216d4
1629529
 
 
 
 
 
92f24a6
 
 
 
1629529
 
ca216d4
1629529
d459ded
1629529
ca216d4
1629529
 
ca216d4
1629529
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ca216d4
 
 
1629529
 
d459ded
1283b39
d459ded
1590870
1283b39
 
 
 
 
1629529
1590870
1629529
 
1283b39
1629529
 
 
 
 
 
 
 
 
d459ded
92f24a6
d459ded
1629529
 
 
 
 
 
d459ded
1283b39
d459ded
1629529
874bc57
1629529
 
 
89df719
d459ded
1629529
1283b39
92f24a6
1283b39
 
874bc57
 
1590870
 
874bc57
 
1283b39
874bc57
1283b39
874bc57
1283b39
92f24a6
1283b39
 
 
874bc57
1283b39
874bc57
1283b39
874bc57
1283b39
1629529
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
import os
import re
import gradio as gr
from groq import Groq

# -----------------------------
# Groq Initialization
# -----------------------------
client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
MODEL_NAME = "llama-3.1-8b-instant"

# -----------------------------
# System Prompt
# -----------------------------
SYSTEM_PROMPT = """
You are an elite Senior Industry Interviewer and AI Interview Coach from a top global technology company.

Conduct a structured mock interview.

INTERVIEW STRUCTURE:
PHASE 1 – Introduction: Greet, confirm role, warm-up.
PHASE 2 – Technical: One question at a time, adjust difficulty.
PHASE 3 – Behavioral: Teamwork, leadership, problem-solving, failure.
PHASE 4 – Rapid Fire: 5 short questions.

RULES:
- Professional
- Do not break role
- No feedback until candidate types EXACTLY: END INTERVIEW

Generate report in this format:

INTERVIEW SUMMARY REPORT

Candidate Role: <role>
Technical Knowledge Score: X/10
Problem Solving Score: X/10
Communication Score: X/10
Confidence Score: X/10
Overall Hire Recommendation: Strong Hire / Hire / Borderline / Reject

Strengths:
• Bullet points

Areas for Improvement:
• Bullet points

Suggested Preparation Strategy:
• Bullet points

Never explain scoring logic.
Never say you are AI.
"""

# -----------------------------
# Function to stream chat from Groq
# -----------------------------
def chat_with_llama_stream(user_message, backend_messages):
    # Build fresh list of dicts for Groq API
    messages_for_groq = [{"role": "system", "content": SYSTEM_PROMPT}]
    for msg in backend_messages:
        messages_for_groq.append({"role": msg["role"], "content": msg["content"]})
    messages_for_groq.append({"role": "user", "content": user_message})

    # Stream response
    response = client.chat.completions.create(
        model=MODEL_NAME,
        messages=messages_for_groq,
        stream=True,
    )

    full_response = ""
    for chunk in response:
        if chunk.choices[0].delta.content:
            full_response += chunk.choices[0].delta.content
            yield full_response

# -----------------------------
# Confidence extractor
# -----------------------------
def extract_confidence_score(text):
    match = re.search(r"Confidence Score:\s*(\d+)/10", text)
    if match:
        return int(match.group(1))
    return None

# -----------------------------
# Gradio UI
# -----------------------------
with gr.Blocks(theme=gr.themes.Soft()) as demo:

    gr.Markdown("## 🎯 AI Mock Interview System")
    gr.Markdown("Powered by Groq + LLaMA 3.1")

    chatbot = gr.Chatbot([], elem_id="chatbot")
    msg = gr.Textbox(placeholder="Enter your role or answer...", label="Your Input")
    confidence_display = gr.Number(label="Confidence Score", interactive=False)
    backend_messages = gr.State([])  # always store as list of dicts

    def respond(user_message, backend_messages, chat_display):
        backend_messages = backend_messages or []
        chat_display = chat_display or []

        # Add user to UI display
        chat_display.append((user_message, ""))

        assistant_text = ""
        for partial in chat_with_llama_stream(user_message, backend_messages):
            assistant_text = partial
            # Update last assistant message in UI
            chat_display[-1] = (user_message, assistant_text)
            yield chat_display, backend_messages, None

        # Add user & assistant to backend messages (dicts only)
        backend_messages.append({"role": "user", "content": user_message})
        backend_messages.append({"role": "assistant", "content": assistant_text})

        # Extract confidence score
        score = extract_confidence_score(assistant_text)
        yield chat_display, backend_messages, score

    msg.submit(respond, [msg, backend_messages, chatbot], [chatbot, backend_messages, confidence_display])

demo.launch()