Spaces:
Sleeping
Sleeping
Upload 4 files
Browse files- .gitattributes +1 -0
- Interviewbot_poc_image.png +3 -0
- app.py +58 -0
- interview_assistant.py +110 -0
- requirements.txt +4 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
Interviewbot_poc_image.png filter=lfs diff=lfs merge=lfs -text
|
Interviewbot_poc_image.png
ADDED
|
Git LFS Details
|
app.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
import streamlit as st
|
| 3 |
+
from interview_assistant import InterviewAssistant
|
| 4 |
+
|
| 5 |
+
# Initialize the InterviewAssistant class
|
| 6 |
+
interview_assistant = InterviewAssistant()
|
| 7 |
+
|
| 8 |
+
# Streamlit app header
|
| 9 |
+
st.header("TalentScout Interview Assistant")
|
| 10 |
+
|
| 11 |
+
# Initialize session state for storing messages
|
| 12 |
+
if "messages" not in st.session_state:
|
| 13 |
+
st.session_state.messages = []
|
| 14 |
+
|
| 15 |
+
# Initialize session state for the initial message
|
| 16 |
+
if "inisial_message" not in st.session_state:
|
| 17 |
+
st.session_state.inisial_message = []
|
| 18 |
+
|
| 19 |
+
# Display the initial message from the assistant
|
| 20 |
+
with st.chat_message("assistant"):
|
| 21 |
+
msg_to_candidate = (
|
| 22 |
+
"To assist you better, we kindly request your name, email, and phone number. "
|
| 23 |
+
"Your information will be handled securely and will not be shared with anyone. "
|
| 24 |
+
"We follow all GDPR guidelines to protect your privacy. Do you agree to share this information?"
|
| 25 |
+
)
|
| 26 |
+
st.markdown(msg_to_candidate)
|
| 27 |
+
st.session_state.inisial_message.append({"role": "assistant", "content": msg_to_candidate})
|
| 28 |
+
|
| 29 |
+
# Display chat history
|
| 30 |
+
for message in st.session_state.messages:
|
| 31 |
+
with st.chat_message(message["role"]):
|
| 32 |
+
st.markdown(message["content"])
|
| 33 |
+
|
| 34 |
+
# Handle user input
|
| 35 |
+
if prompt := st.chat_input("What is up?"):
|
| 36 |
+
# Display user message in chat message container
|
| 37 |
+
st.chat_message("user").markdown(prompt)
|
| 38 |
+
# Add user message to chat history
|
| 39 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 40 |
+
|
| 41 |
+
# Get the assistant's response
|
| 42 |
+
response = interview_assistant.interview_process_by_assistant(
|
| 43 |
+
prompt, [st.session_state.inisial_message, st.session_state.messages]
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
# Extract JSON data from the response using regex
|
| 47 |
+
pattern = r'(\{\s*"full_name":.*\})'
|
| 48 |
+
match = re.search(pattern, response, re.DOTALL)
|
| 49 |
+
|
| 50 |
+
if match:
|
| 51 |
+
extracted_json = match.group(1) # Extract the JSON string
|
| 52 |
+
print(extracted_json) # Print the extracted JSON (for debugging)
|
| 53 |
+
|
| 54 |
+
# Display the assistant's response
|
| 55 |
+
with st.chat_message("assistant"):
|
| 56 |
+
st.markdown(response)
|
| 57 |
+
# Add assistant response to chat history
|
| 58 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
interview_assistant.py
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import logging
|
| 3 |
+
from typing import List, Dict
|
| 4 |
+
from dotenv import load_dotenv
|
| 5 |
+
from langchain_mistralai import ChatMistralAI
|
| 6 |
+
from langchain_core.prompts import ChatPromptTemplate
|
| 7 |
+
|
| 8 |
+
# Load environment variables from .env file
|
| 9 |
+
load_dotenv()
|
| 10 |
+
|
| 11 |
+
# Configure logging
|
| 12 |
+
logging.basicConfig(
|
| 13 |
+
level=logging.INFO,
|
| 14 |
+
format="%(asctime)s - %(levelname)s - %(message)s"
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
class InterviewAssistant:
|
| 18 |
+
"""
|
| 19 |
+
A class to handle the initial screening process for candidates using a conversational AI model.
|
| 20 |
+
|
| 21 |
+
Attributes:
|
| 22 |
+
llm (ChatMistralAI): The language model used for generating responses.
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
def __init__(self):
|
| 26 |
+
"""
|
| 27 |
+
Initializes the InterviewAssistant class by setting up the language model.
|
| 28 |
+
"""
|
| 29 |
+
logging.info("Initializing InterviewAssistant...")
|
| 30 |
+
self.llm = ChatMistralAI(
|
| 31 |
+
model="mistral-large-latest",
|
| 32 |
+
api_key=os.getenv("MISTRAL_API_KEY"), # API key loaded from environment variables
|
| 33 |
+
temperature=0.1, # Controls randomness in responses
|
| 34 |
+
max_retries=2, # Number of retries for failed requests
|
| 35 |
+
)
|
| 36 |
+
logging.info("InterviewAssistant initialized successfully.")
|
| 37 |
+
|
| 38 |
+
def interview_process_by_assistant(self, question: str, history: List[Dict[str, str]]) -> str:
|
| 39 |
+
"""
|
| 40 |
+
Handles the interview process by generating responses based on the candidate's input and conversation history.
|
| 41 |
+
|
| 42 |
+
Args:
|
| 43 |
+
question (str): The candidate's input question or response.
|
| 44 |
+
history (List[Dict[str, str]]): The conversation history between the assistant and the candidate.
|
| 45 |
+
|
| 46 |
+
Returns:
|
| 47 |
+
str: The assistant's response to the candidate.
|
| 48 |
+
"""
|
| 49 |
+
logging.info("Generating response for the candidate...")
|
| 50 |
+
|
| 51 |
+
# Define the prompt template for the interview assistant
|
| 52 |
+
prompt = ChatPromptTemplate.from_messages([
|
| 53 |
+
(
|
| 54 |
+
"system",
|
| 55 |
+
"""Your are the hr assitant for talentScount you will be assit for inizial screening.Your task is gather information from candidate one by one and one of the information[First show the details that could be ask then ask one by one] is tech stack ask 3 or 4 technical questions to canditate based on tech stack and ask technical questions one by one analyze the converstation histroy for maintain correct flow woth candidate.Once the inizial screening process completely done ending greeting with we cantact throw your email for the canditate.After the process candidate give any ending grettings like Thankyou response to this
|
| 56 |
+
|
| 57 |
+
Information you need to gather:
|
| 58 |
+
Full Name
|
| 59 |
+
Email Address
|
| 60 |
+
Phone Number
|
| 61 |
+
Years of Experience
|
| 62 |
+
Desired Position(s)
|
| 63 |
+
Current Location
|
| 64 |
+
Tech Stack
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
Return Format Should be like this:
|
| 68 |
+
{{
|
| 69 |
+
"full_name": "string",
|
| 70 |
+
"email": "string",
|
| 71 |
+
"phone_number": "string",
|
| 72 |
+
"years_of_experience": "string",
|
| 73 |
+
"desired_position": "string",
|
| 74 |
+
"current_location": "string",
|
| 75 |
+
"tech_stack": "string"
|
| 76 |
+
"technical_questions_and_answers": [{{"question1":"answer1","question2":"answer2","question3":"answer3"}}]
|
| 77 |
+
}}
|
| 78 |
+
|
| 79 |
+
Example for technical question based on tech stack:
|
| 80 |
+
example tech stack: programming languages, frameworks, databases, and tools they are proficient in.
|
| 81 |
+
you need ask technical questions from this skills one by one
|
| 82 |
+
|
| 83 |
+
Thinks to be reminder:
|
| 84 |
+
analyse history for maintain correct flow with candidate
|
| 85 |
+
first[staring] show what are the information you need ask then ask one by one information
|
| 86 |
+
provide meaningful responses do not have string like this "[Provide your answer or name...]
|
| 87 |
+
once all the process completed give end greetings.
|
| 88 |
+
We follow all GDPR guidelines to protect your privacy so candidate not intersted to share this information give end greetings.
|
| 89 |
+
""",
|
| 90 |
+
),
|
| 91 |
+
("human", "{candidate_input} {history}",)
|
| 92 |
+
])
|
| 93 |
+
|
| 94 |
+
# Combine the prompt with the language model
|
| 95 |
+
chain = prompt | self.llm
|
| 96 |
+
|
| 97 |
+
try:
|
| 98 |
+
# Invoke the chain with the candidate's input and conversation history
|
| 99 |
+
llm_response = chain.invoke(
|
| 100 |
+
{
|
| 101 |
+
"candidate_input": question,
|
| 102 |
+
"history": history,
|
| 103 |
+
}
|
| 104 |
+
)
|
| 105 |
+
result = llm_response.content
|
| 106 |
+
logging.info("Response generated successfully.")
|
| 107 |
+
return result
|
| 108 |
+
except Exception as e:
|
| 109 |
+
logging.error(f"Error generating response: {e}")
|
| 110 |
+
return "An error occurred while processing your request. Please try again."
|
requirements.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
streamlit
|
| 2 |
+
python-dotenv
|
| 3 |
+
langchain_core
|
| 4 |
+
langchain_mistralai
|