Spaces:
Runtime error
Runtime error
Laura's UI changes
Browse files
app.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
| 1 |
import os
|
| 2 |
import urllib.request
|
|
|
|
| 3 |
from time import sleep
|
| 4 |
from typing import Dict, List, Generator
|
| 5 |
|
|
@@ -9,44 +10,68 @@ from dotenv import load_dotenv
|
|
| 9 |
|
| 10 |
load_dotenv()
|
| 11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
class MockInterviewer:
|
| 13 |
|
| 14 |
def __init__(self) -> None:
|
| 15 |
self._client = OpenAI(api_key=os.environ['OPENAI_API_KEY'])
|
| 16 |
-
self._assistant_id_cache: Dict[
|
| 17 |
self.clear_thread()
|
| 18 |
|
| 19 |
-
def chat_with_text(
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
with open(audio, 'rb') as audio_file:
|
| 24 |
transcriptions = self._client.audio.transcriptions.create(
|
| 25 |
model='whisper-1',
|
| 26 |
file=audio_file,
|
| 27 |
)
|
| 28 |
os.remove(audio)
|
| 29 |
-
|
|
|
|
| 30 |
return [(transcriptions.text, response)]
|
| 31 |
|
| 32 |
def clear_thread(self) -> None:
|
| 33 |
print('Initializing new thread')
|
| 34 |
self._thread = self._client.beta.threads.create()
|
| 35 |
|
| 36 |
-
def _chat(self, message: str,
|
| 37 |
print('Started chat')
|
| 38 |
-
self.
|
| 39 |
-
assistant_id = self._init_assistant(job_role, company)
|
| 40 |
return self._send_message(message, assistant_id)
|
| 41 |
|
| 42 |
-
def _validate_fields(self, job_role: str, company: str) -> None:
|
| 43 |
-
if not job_role and not company:
|
| 44 |
-
raise gr.Error('Job Role and Company are required fields.')
|
| 45 |
-
if not job_role:
|
| 46 |
-
raise gr.Error('Job Role is a required field.')
|
| 47 |
-
if not company:
|
| 48 |
-
raise gr.Error('Company is a required field.')
|
| 49 |
-
|
| 50 |
def _send_message(self, message: str, assistant_id: str) -> str:
|
| 51 |
self._client.beta.threads.messages.create(thread_id=self._thread.id, role='user', content=message)
|
| 52 |
print('Message created')
|
|
@@ -82,18 +107,18 @@ class MockInterviewer:
|
|
| 82 |
os.remove(filename)
|
| 83 |
return file_ids
|
| 84 |
|
| 85 |
-
def _init_assistant(self,
|
| 86 |
-
cache_key =
|
| 87 |
if cache_key in self._assistant_id_cache:
|
| 88 |
print(f'Fetched from cache for key {cache_key}')
|
| 89 |
return self._assistant_id_cache.get(cache_key)
|
| 90 |
else:
|
| 91 |
print(f'Initializing new assistant for key {cache_key}')
|
| 92 |
-
file_ids = self._create_files(company)
|
| 93 |
|
| 94 |
assistant = self._client.beta.assistants.create(
|
| 95 |
name='Mock Interviewer',
|
| 96 |
-
instructions=
|
| 97 |
model='gpt-4-0125-preview',
|
| 98 |
tools=[
|
| 99 |
{
|
|
@@ -104,35 +129,73 @@ class MockInterviewer:
|
|
| 104 |
|
| 105 |
self._assistant_id_cache[cache_key] = assistant.id
|
| 106 |
return assistant.id
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
|
| 108 |
-
|
| 109 |
-
return f'{job_role.lower()}+{company.lower()}'
|
| 110 |
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 114 |
|
|
|
|
| 115 |
with gr.Row():
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 136 |
|
| 137 |
if __name__ == '__main__':
|
| 138 |
-
demo.launch()
|
|
|
|
| 1 |
import os
|
| 2 |
import urllib.request
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
from time import sleep
|
| 5 |
from typing import Dict, List, Generator
|
| 6 |
|
|
|
|
| 10 |
|
| 11 |
load_dotenv()
|
| 12 |
|
| 13 |
+
@dataclass(eq=True, frozen=True)
|
| 14 |
+
class Config:
|
| 15 |
+
job_role: str
|
| 16 |
+
company: str
|
| 17 |
+
job_description: str
|
| 18 |
+
behavioral_count: int
|
| 19 |
+
technical_count: int
|
| 20 |
+
situational_count: int
|
| 21 |
+
case_count: int
|
| 22 |
+
|
| 23 |
class MockInterviewer:
|
| 24 |
|
| 25 |
def __init__(self) -> None:
|
| 26 |
self._client = OpenAI(api_key=os.environ['OPENAI_API_KEY'])
|
| 27 |
+
self._assistant_id_cache: Dict[Config, str] = {}
|
| 28 |
self.clear_thread()
|
| 29 |
|
| 30 |
+
def chat_with_text(
|
| 31 |
+
self,
|
| 32 |
+
message: Dict,
|
| 33 |
+
history: List[List],
|
| 34 |
+
job_role: str,
|
| 35 |
+
company: str,
|
| 36 |
+
job_description: str,
|
| 37 |
+
behavioral_count: int,
|
| 38 |
+
technical_count: int,
|
| 39 |
+
situational_count: int,
|
| 40 |
+
case_count: int
|
| 41 |
+
) -> Generator:
|
| 42 |
+
config = Config(job_role, company, job_description, behavioral_count, technical_count, situational_count, case_count)
|
| 43 |
+
yield self._chat(message, config)
|
| 44 |
+
|
| 45 |
+
def chat_with_audio(
|
| 46 |
+
self,
|
| 47 |
+
audio: str,
|
| 48 |
+
job_role: str,
|
| 49 |
+
company: str,
|
| 50 |
+
job_description: str,
|
| 51 |
+
behavioral_count: int,
|
| 52 |
+
technical_count: int,
|
| 53 |
+
situational_count: int,
|
| 54 |
+
case_count: int
|
| 55 |
+
) -> str:
|
| 56 |
with open(audio, 'rb') as audio_file:
|
| 57 |
transcriptions = self._client.audio.transcriptions.create(
|
| 58 |
model='whisper-1',
|
| 59 |
file=audio_file,
|
| 60 |
)
|
| 61 |
os.remove(audio)
|
| 62 |
+
config = Config(job_role, company, job_description, behavioral_count, technical_count, situational_count, case_count)
|
| 63 |
+
response = self._chat(transcriptions.text, config)
|
| 64 |
return [(transcriptions.text, response)]
|
| 65 |
|
| 66 |
def clear_thread(self) -> None:
|
| 67 |
print('Initializing new thread')
|
| 68 |
self._thread = self._client.beta.threads.create()
|
| 69 |
|
| 70 |
+
def _chat(self, message: str, config: Config) -> str:
|
| 71 |
print('Started chat')
|
| 72 |
+
assistant_id = self._init_assistant(config)
|
|
|
|
| 73 |
return self._send_message(message, assistant_id)
|
| 74 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 75 |
def _send_message(self, message: str, assistant_id: str) -> str:
|
| 76 |
self._client.beta.threads.messages.create(thread_id=self._thread.id, role='user', content=message)
|
| 77 |
print('Message created')
|
|
|
|
| 107 |
os.remove(filename)
|
| 108 |
return file_ids
|
| 109 |
|
| 110 |
+
def _init_assistant(self, config: Config) -> str:
|
| 111 |
+
cache_key = config
|
| 112 |
if cache_key in self._assistant_id_cache:
|
| 113 |
print(f'Fetched from cache for key {cache_key}')
|
| 114 |
return self._assistant_id_cache.get(cache_key)
|
| 115 |
else:
|
| 116 |
print(f'Initializing new assistant for key {cache_key}')
|
| 117 |
+
file_ids = self._create_files(config.company)
|
| 118 |
|
| 119 |
assistant = self._client.beta.assistants.create(
|
| 120 |
name='Mock Interviewer',
|
| 121 |
+
instructions=self._generate_assistant_instructions(config),
|
| 122 |
model='gpt-4-0125-preview',
|
| 123 |
tools=[
|
| 124 |
{
|
|
|
|
| 129 |
|
| 130 |
self._assistant_id_cache[cache_key] = assistant.id
|
| 131 |
return assistant.id
|
| 132 |
+
|
| 133 |
+
def _generate_assistant_instructions(self, config: Config) -> str:
|
| 134 |
+
if config.job_role and config.company:
|
| 135 |
+
purpose = f'You are Ami, an AI mock interviewer for {config.job_role} roles at {config.company}.'
|
| 136 |
+
elif config.job_role:
|
| 137 |
+
purpose = f'You are Ami, an AI mock interviewer for {config.job_role} roles.'
|
| 138 |
+
elif config.company:
|
| 139 |
+
purpose = f'You are Ami, an AI mock interviewer for roles at {config.company}.'
|
| 140 |
+
else:
|
| 141 |
+
purpose = 'You are Ami, an AI mock interviewer.'
|
| 142 |
+
|
| 143 |
+
if config.job_description:
|
| 144 |
+
specifics = f'Tailor your questions based on the following job posting: {config.job_description}.'
|
| 145 |
+
else:
|
| 146 |
+
specifics = ''
|
| 147 |
+
|
| 148 |
+
return f"{purpose} Please state your purpose when the candidate sends you the first message. If you have been provided a file, use it as an interview guide. {specifics} Ask {config.behavioral_count} number of behavioral questions, {config.technical_count} number of technical questions, {config.situational_count} number of situational questions, and {config.case_count} number of case-like questions. After the candidate gives a response, evaluate the response of the candidate by addressing the candidate as if you were giving feedback to them (i.e. address them as you). Keep in mind what your company values in candidates. Provide a detailed analysis of the candidate's response based on the question type. Also, rate the response on a scale from 1 to 10, where 1 is inadequate and 10 is exceptional."
|
| 149 |
|
| 150 |
+
mock_interviewer = MockInterviewer()
|
|
|
|
| 151 |
|
| 152 |
+
theme = gr.themes.Soft(
|
| 153 |
+
primary_hue="blue",
|
| 154 |
+
secondary_hue="indigo",
|
| 155 |
+
).set(
|
| 156 |
+
body_background_fill='*neutral_100',
|
| 157 |
+
body_background_fill_dark='*background_fill_secondary'
|
| 158 |
+
)
|
| 159 |
|
| 160 |
+
with gr.Blocks(theme=theme) as demo:
|
| 161 |
with gr.Row():
|
| 162 |
+
with gr.Column(variant='panel', scale=1):
|
| 163 |
+
config_title = gr.Markdown('### Interview Settings')
|
| 164 |
+
with gr.Accordion("Job Information", open=False):
|
| 165 |
+
job_role = gr.Textbox(label='Job Role', placeholder='Product Manager')
|
| 166 |
+
company = gr.Textbox(label='Company', placeholder='Amazon')
|
| 167 |
+
job_description = gr.TextArea(
|
| 168 |
+
label='Job Description',
|
| 169 |
+
info='Please copy and paste any relevant job description and information here:',
|
| 170 |
+
placeholder='Key job responsibilities, basic qualifications, preferred qualifications, about the company, etc.'
|
| 171 |
+
)
|
| 172 |
+
with gr.Accordion("Question Preferences", open=False):
|
| 173 |
+
label='Question Type and Count'
|
| 174 |
+
info='Please indicate how many questions you would like asked on the following question types:',
|
| 175 |
+
behavioral_count = gr.Slider(label="Behavioral", maximum=10, value=1, step=1)
|
| 176 |
+
technical_count = gr.Slider(label="Technical", maximum=10, value=1, step=1)
|
| 177 |
+
situational_count = gr.Slider(label="Situational", maximum=10, value=1, step=1)
|
| 178 |
+
case_count = gr.Slider(label="Case", maximum=10, value=1, step=1)
|
| 179 |
+
|
| 180 |
+
with gr.Column(variant='panel', scale=6):
|
| 181 |
+
chat_interface = gr.ChatInterface(
|
| 182 |
+
fn=mock_interviewer.chat_with_text,
|
| 183 |
+
title="Hi! I'm Ami, your AI Mock Interviewer.",
|
| 184 |
+
description='You can begin by clicking record and introducing yourself!',
|
| 185 |
+
additional_inputs=[job_role, company, job_description, behavioral_count, technical_count, situational_count, case_count],
|
| 186 |
+
retry_btn=None,
|
| 187 |
+
undo_btn=None)
|
| 188 |
+
|
| 189 |
+
chat_interface.chatbot.height= '45vh'
|
| 190 |
+
|
| 191 |
+
chat_interface.load(mock_interviewer.clear_thread)
|
| 192 |
+
chat_interface.clear_btn.click(mock_interviewer.clear_thread)
|
| 193 |
+
|
| 194 |
+
audio = gr.Audio(sources=['microphone'], type='filepath', editable=False)
|
| 195 |
+
audio.stop_recording(fn=mock_interviewer.chat_with_audio,
|
| 196 |
+
inputs=[audio, job_role, company, job_description, behavioral_count, technical_count, situational_count, case_count],
|
| 197 |
+
outputs=[chat_interface.chatbot],
|
| 198 |
+
api_name=False)
|
| 199 |
|
| 200 |
if __name__ == '__main__':
|
| 201 |
+
demo.launch()
|