File size: 6,672 Bytes
ced8c16
b371099
 
 
 
 
ced8c16
b68f858
b371099
 
 
 
 
 
 
6b1c2ef
b371099
 
 
 
 
ced8c16
b371099
 
 
 
 
 
 
 
 
 
 
ced8c16
b371099
ced8c16
 
b371099
 
ced8c16
b371099
 
 
ced8c16
b371099
 
b68f858
 
 
b371099
 
ced8c16
 
b371099
 
 
5c0808b
 
 
 
b371099
b68f858
5c0808b
 
b371099
b68f858
b371099
 
ced8c16
b371099
b68f858
 
 
b371099
b68f858
b371099
ced8c16
b371099
 
 
ced8c16
b371099
b68f858
 
b371099
ced8c16
b68f858
 
b371099
 
 
 
b68f858
ced8c16
b371099
 
ced8c16
b68f858
b371099
b68f858
b371099
 
ced8c16
b371099
b68f858
b371099
b68f858
b371099
b68f858
b371099
b68f858
 
 
ced8c16
 
 
 
b68f858
 
 
 
 
 
 
ced8c16
b68f858
b371099
 
ced8c16
b371099
ced8c16
b68f858
ced8c16
 
b68f858
b371099
 
ced8c16
 
 
 
 
b68f858
 
ced8c16
b68f858
ced8c16
b68f858
 
ced8c16
 
b68f858
 
ced8c16
 
 
b68f858
 
b371099
ced8c16
 
b68f858
b371099
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
# βœ… FINAL MERGED & ENHANCED CODE: AI Interview Bot (Jason Voice, One-by-One, Stylish UI)

import os
import gradio as gr
import whisper
import tempfile
import re
from dotenv import load_dotenv
from TTS.api import TTS
import google.generativeai as genai

# -----------------------------
# βœ… CONFIGURE GEMINI API
# -----------------------------
genai.configure(api_key="AIzaSyAeNCjKJVCT0gmQRAPq4NltXkc-1zELH28")
model = genai.GenerativeModel("gemini-1.5-flash-latest")

# -----------------------------
# βœ… LOAD MODELS
# -----------------------------
asr_model = whisper.load_model("base")
jason_tts = TTS(model_name="tts_models/en/ljspeech/tacotron2-DDC", progress_bar=False, gpu=False)

# -----------------------------
# βœ… GLOBAL STATE
# -----------------------------
candidate_name = ""
field = ""
interview_questions = []
current_question_index = 0
feedback_summary = []

# -----------------------------
# βœ… STEP 1: Collect Candidate Info
# -----------------------------
def save_user_info(name, user_field):
    global candidate_name, field
    candidate_name = name
    field = user_field
    greeting = f"Welcome {name}! Jason will conduct your mock interview for the {field} internship."
    return greeting

# -----------------------------
# βœ… STEP 2: Generate Questions with Strict Format
# -----------------------------
def generate_questions():
    global field, interview_questions, current_question_index, feedback_summary
    current_question_index = 0
    feedback_summary = []

    prompt = (
        f"You are a professional interviewer. Generate exactly 12 internship questions (7 technical + 5 behavioral) for the field of {field}.\n"
        f"Output only the questions, each starting with a number (e.g., 1. ..., 2. ..., ..., 12.). Do not include headings or explanations."
    )
    try:
        response = model.generate_content(prompt)
        raw_output = response.text.strip()
        lines = re.findall(r"^\d+\.\s+.*", raw_output, re.MULTILINE)
        interview_questions = [line.strip() for line in lines]

        if len(interview_questions) != 12:
            return "⚠️ Gemini did not return exactly 12 questions. Please try again.", ""

        return "βœ… Questions generated successfully!", interview_questions[0]
    except Exception as e:
        return f"Error: {str(e)}", ""

# -----------------------------
# βœ… STEP 3: Jason Speaks Current Question
# -----------------------------
def speak_current_question():
    if not interview_questions or current_question_index >= len(interview_questions):
        return "No more questions.", None

    question = interview_questions[current_question_index]
    temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".wav")
    jason_tts.tts_to_file(text=question, file_path=temp_file.name)
    return question, temp_file.name

# -----------------------------
# βœ… STEP 4: Record Answer & Store for Analysis
# -----------------------------
def record_answer(audio_file):
    global current_question_index
    if not interview_questions:
        return "❌ Generate questions first.", ""
    if audio_file is None:
        return "❌ Please record your answer.", ""

    try:
        result = asr_model.transcribe(audio_file)
        transcript = result["text"]
        question = interview_questions[current_question_index]
        feedback_summary.append({"question": question, "answer": transcript, "feedback": None})

        current_question_index += 1
        next_q = interview_questions[current_question_index] if current_question_index < len(interview_questions) else "βœ… Interview complete. Click SUBMIT for feedback."
        return f"βœ… Answer recorded for Q{current_question_index}.", next_q
    except Exception as e:
        return f"Error: {str(e)}", ""

# -----------------------------
# βœ… STEP 5: Analyze All Answers via Gemini
# -----------------------------
def analyze_all():
    if not feedback_summary:
        return "❌ No answers recorded."

    final_summary = ""
    for i, item in enumerate(feedback_summary, 1):
        prompt = (
            f"Interview Question: {item['question']}\n"
            f"Candidate's Answer: {item['answer']}\n"
            "Evaluate the answer:\n"
            "- Overall Rating: Weak/Average/Good/Excellent\n"
            "- Score: out of 100\n"
            "- Suggestions:"
        )
        try:
            response = model.generate_content(prompt)
            feedback = response.text.strip()
            item['feedback'] = feedback
            final_summary += f"Q{i}: {item['question']}\nAnswer: {item['answer']}\n{feedback}\n{'-'*50}\n"
        except Exception as e:
            final_summary += f"Q{i}: {item['question']}\nAnswer: {item['answer']}\nError: {str(e)}\n{'-'*50}\n"
    return final_summary

# -----------------------------
# βœ… GRADIO UI (Modern Look)
# -----------------------------
with gr.Blocks(theme=gr.themes.Soft(primary_hue="violet")) as demo:
    gr.Markdown("""
    <div style='text-align: center; font-size: 28px; font-weight: bold;'>πŸŽ™οΈ AI Mock Interview Bot</div>
    <div style='text-align: center; font-size: 16px;'>Choose your field and get started. Jason will guide you through your interview.</div>
    """)

    with gr.Row():
        name = gr.Textbox(label="πŸ‘€ Your Name")
        field = gr.Textbox(label="🎯 Interview Field (e.g. Software, Marketing, HR)")
        start_btn = gr.Button("πŸš€ Begin Interview")
    greet = gr.Textbox(label="Welcome Message", interactive=False)
    start_btn.click(save_user_info, inputs=[name, field], outputs=greet)

    with gr.Row():
        gen_btn = gr.Button("πŸ“„ Generate Questions")
        status = gr.Textbox(label="Status")
        current_q = gr.Textbox(label="πŸ“Œ Current Question", lines=2)
    gen_btn.click(generate_questions, outputs=[status, current_q])

    speak = gr.Button("πŸ”Š Hear Question", elem_classes="speak-btn")
    audio = gr.Audio(label="πŸ”ˆ Jason's Voice")
    speak.click(fn=speak_current_question, outputs=[current_q, audio])

    record = gr.Audio(sources=["microphone"], type="filepath", label="🎀 Record Your Answer")
    submit_ans = gr.Button("βœ… Submit This Answer")
    rec_status = gr.Textbox(label="Answer Status")
    next_q = gr.Textbox(label="Next Question")
    submit_ans.click(fn=record_answer, inputs=record, outputs=[rec_status, current_q])

    submit_all = gr.Button("πŸ“Š Submit All & Get Feedback")
    analysis = gr.Textbox(label="πŸ“‹ Final Feedback Summary", lines=20)
    submit_all.click(fn=analyze_all, outputs=analysis)

# -----------------------------
# βœ… RUN APP
# -----------------------------
if __name__ == "__main__":
    demo.launch()