Spaces:
Runtime error
Runtime error
File size: 9,648 Bytes
45a190a 3320d93 35691fd 6d2ca81 f737e92 64a8e04 a072ba6 45a190a a072ba6 45a190a a072ba6 45a190a a072ba6 45a190a 6d2ca81 d69dd0a 6d2ca81 bc8a0b2 f737e92 7b34f05 f737e92 7b34f05 d69dd0a 6d2ca81 45a190a 64a8e04 b4c218d 64a8e04 45a190a a1e618e 000de83 4129a95 888c9ce a1e618e 888c9ce 000de83 a1e618e b8ba472 f737e92 4129a95 000de83 b8ba472 888c9ce f737e92 000de83 f737e92 000de83 f737e92 000de83 f737e92 000de83 973e767 000de83 6d2ca81 000de83 f737e92 000de83 6d2ca81 f737e92 6d2ca81 f737e92 52c2de1 512a608 4129a95 b8ba472 2e48300 6d2ca81 d69dd0a 6d2ca81 d69dd0a b4c218d d69dd0a 45a190a d69dd0a 6d2ca81 45a190a 64a8e04 45a190a 888c9ce f737e92 b8ba472 888c9ce 45a190a 1ac7b57 b8ba472 35691fd 888c9ce 000de83 b8ba472 8a4df17 daad191 a9cdc03 35691fd 4129a95 b8ba472 4129a95 888c9ce a1e618e b261dbb 0548cfc b8ba472 64a8e04 45a190a 980eae5 45a190a b4c218d 45a190a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 |
# --- IMPORTS ---
import gradio as gr
from huggingface_hub import InferenceClient
import re
import random
import whisper
from pydub import AudioSegment
import torch
import torchvision.transforms as transforms
import torchvision.models as models
from PIL import Image
# --- LOAD MODELS ---
# HuggingFace Zephyr Model for Chat
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
# Whisper Model for Audio-to-Text
whisper_model = whisper.load_model("base")
# ResNet18 Model for Posture Classification
model = models.resnet18(weights=models.ResNet18_Weights.IMAGENET1K_V1)
model.fc = torch.nn.Linear(model.fc.in_features, 2)
model.eval()
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor()
])
# --- LOAD QUESTIONS ---
def load_questions(file_path):
with open(file_path, 'r') as f:
data = f.read()
question_blocks = re.split(r'Question:\s*', data)[1:]
questions = []
for block in question_blocks:
parts = block.split('Possible Answers:')
question_text = parts[0].strip()
answers_text = parts[1].strip()
possible_answers = [ans.strip() for ans in re.split(r'\d+\.\s+', answers_text) if ans.strip()]
questions.append({'question': question_text, 'answers': possible_answers})
return questions
all_questions = load_questions('knowledge.txt')
questions_by_type = {
'Technical': [q for q in all_questions if any(keyword in q['question'].lower() for keyword in [
'function', 'linked list', 'url', 'rest', 'graphql', 'garbage', 'cap theorem', 'sql', 'hash table',
'stack', 'queue', 'recursion', 'reverse', 'bfs', 'dfs', 'time complexity', 'binary search tree',
'web application', 'chat system', 'load balancing', 'caching', 'normalization', 'acid', 'indexing',
'sql injection', 'https', 'xss', 'hash', 'vulnerabilities'])],
'Competency-Based Interview': [q for q in all_questions if any(keyword in q['question'].lower() for keyword in [
"debugging", "learning", "deadlines", "teamwork", "leadership", "mistake", "conflict", "decision"])],
'Case': [q for q in all_questions if any(keyword in q['question'].lower() for keyword in [
"testing", "financial", "automation", "analysis", "regression", "business", "stakeholder"])]
}
# --- AUDIO TRANSCRIPTION ---
def transcribe_audio(file_path):
try:
audio = AudioSegment.from_file(file_path)
converted_path = "converted.wav"
audio.export(converted_path, format="wav")
result = whisper_model.transcribe(converted_path, fp16=False)
return result["text"]
except Exception as e:
return f"β ERROR: {str(e)}"
# --- POSTURE CLASSIFICATION ---
def classify_image(image):
if image is None:
return "No image provided! Please upload or capture an image."
image = transform(image).unsqueeze(0)
output = model(image)
_, predicted = torch.max(output, 1)
return (
"β
Good Posture! Sit exactly like that for your Interview!"
if predicted.item() == 0
else "β οΈ Bad Posture β try sitting straighter or more centered for your real interview."
)
# --- INTERVIEW LOGIC ---
def set_type(choice, user_profile):
user_profile["interview_type"] = choice
return "Great! Whatβs your background and what field/role are you aiming for?", user_profile
def save_background(info, user_profile):
user_profile["field"] = info
return "Awesome! Type 'start' below to begin your interview.", user_profile
def respond(message, chat_history, user_profile):
message_lower = message.strip().lower()
if not user_profile.get("interview_type") or not user_profile.get("field"):
bot_msg = "Please finish steps 1 and 2 before starting the interview."
chat_history.append((message, bot_msg))
return chat_history
if message_lower == 'start':
interview_type = user_profile['interview_type']
selected_questions = questions_by_type.get(interview_type, [])
random.shuffle(selected_questions)
selected_questions = selected_questions[:10]
user_profile['questions'] = selected_questions
user_profile['current_q'] = 0
user_profile['user_answers'] = []
user_profile['interview_in_progress'] = True
intro = f"Welcome to your {interview_type} interview for a {user_profile['field']} position. I will ask you up to 10 questions. Type 'stop' anytime to end."
first_q = f"First question: {selected_questions[0]['question']}"
chat_history.append((message, intro))
chat_history.append(("", first_q))
return chat_history
if message_lower == 'stop' and user_profile.get("interview_in_progress"):
user_profile['interview_in_progress'] = False
bot_msg = "Interview stopped. Type 'feedback' if you'd like me to analyze your answers. Thanks for interviewing with Intervu!"
chat_history.append((message, bot_msg))
return chat_history
if user_profile.get("interview_in_progress"):
q_index = user_profile['current_q']
user_profile['user_answers'].append(message)
q_index += 1
user_profile['current_q'] = q_index
if q_index < len(user_profile['questions']):
bot_msg = f"Next question: {user_profile['questions'][q_index]['question']}"
else:
user_profile['interview_in_progress'] = False
bot_msg = "Interview complete! Type 'feedback' if you'd like me to analyze your answers. Thanks for interviewing with Intervu!"
chat_history.append((message, bot_msg))
return chat_history
if message_lower == 'feedback':
feedback = generate_feedback(user_profile)
chat_history.append((message, feedback))
return chat_history
messages = [{"role": "system", "content": f"You are a professional interviewer conducting a {user_profile['interview_type']} interview for a candidate in {user_profile['field']}."}]
for q, a in chat_history:
messages.append({"role": "user", "content": q})
messages.append({"role": "assistant", "content": a})
messages.append({"role": "user", "content": message})
response = client.chat_completion(messages, max_tokens=150, stream=False)
bot_msg = response.choices[0].message.content
chat_history.append((message, bot_msg))
return chat_history
def generate_feedback(user_profile):
feedback = []
questions = user_profile.get('questions', [])
answers = user_profile.get('user_answers', [])
for i, user_ans in enumerate(answers):
correct_answers = questions[i]['answers']
match = any(ans.lower() in user_ans.lower() for ans in correct_answers)
if match:
fb = f"Question {i+1}: β
Good job!"
else:
fb = f"Question {i+1}: β Missed key points: {correct_answers[0]}"
feedback.append(fb)
return "\n".join(feedback)
# AUDIO HANDLING
def handle_audio(audio_file, chat_history, user_profile):
transcribed = transcribe_audio(audio_file)
if transcribed.startswith("β"):
chat_history.append(("Audio input", transcribed))
return chat_history
return respond(transcribed, chat_history, user_profile)
# --- GRADIO INTERFACE ---
with gr.Blocks() as demo:
user_profile = gr.State({"interview_type": "", "field": "", "interview_in_progress": False})
chat_history = gr.State([])
gr.Markdown("# π€ Welcome to Intervu")
gr.Image(value="images.JPEG", show_label=False, width=200)
gr.Markdown("### Step 1: Choose Interview Type")
with gr.Row():
with gr.Column():
btn1 = gr.Button("Technical")
btn2 = gr.Button("Competency-Based Interview")
btn3 = gr.Button("Case")
type_output = gr.Textbox(label="Bot response", interactive=False)
btn1.click(set_type, inputs=[gr.Textbox(value="Technical", visible=False), user_profile], outputs=[type_output, user_profile])
btn2.click(set_type, inputs=[gr.Textbox(value="Competency-Based Interview", visible=False), user_profile], outputs=[type_output, user_profile])
btn3.click(set_type, inputs=[gr.Textbox(value="Case", visible=False), user_profile], outputs=[type_output, user_profile])
gr.Markdown("### Step 2: Enter Your Background")
background = gr.Textbox(label="Your background and field/goal")
background_btn = gr.Button("Submit")
background_output = gr.Textbox(label="Bot response", interactive=False)
background_btn.click(save_background, inputs=[background, user_profile], outputs=[background_output, user_profile])
gr.Markdown("### Step 3: Start Interview")
chatbot = gr.Chatbot(label="Interview Bot", type="messages")
with gr.Row():
msg = gr.Textbox(label="Your message")
audio_input = gr.Audio(type="filepath", label="ποΈ Upload or Record your answer")
with gr.Row():
send_btn = gr.Button("Send Text")
audio_btn = gr.Button("Send Audio")
send_btn.click(respond, inputs=[msg, chat_history, user_profile], outputs=[chatbot], queue=False)
send_btn.click(lambda: "", None, msg, queue=False)
audio_btn.click(handle_audio, inputs=[audio_input, chat_history, user_profile], outputs=[chatbot], queue=False)
# β
Step 4: Webcam Posture Check
gr.Markdown("### Step 4: Webcam Posture Check")
webcam = gr.Image(source="webcam", label="Capture Posture")
posture_output = gr.Textbox(label="Posture Feedback")
posture_btn = gr.Button("Analyze Posture")
posture_btn.click(classify_image, inputs=[webcam], outputs=[posture_output])
# LAUNCH
demo.launch()
|