Spaces:
Runtime error
Runtime error
Upload 5 files
Browse files- app.py +18 -0
- bio.py +84 -0
- display.py +19 -0
- evaluate.py +63 -0
- round1.py +125 -0
app.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import bio
|
| 3 |
+
import round1
|
| 4 |
+
import display
|
| 5 |
+
|
| 6 |
+
# Define pages here
|
| 7 |
+
PAGES = {
|
| 8 |
+
"Candidate Bio": bio,
|
| 9 |
+
"Interview Round 1": round1,
|
| 10 |
+
"Display": display,
|
| 11 |
+
# Add more pages as needed
|
| 12 |
+
}
|
| 13 |
+
|
| 14 |
+
st.sidebar.title('Main Menu')
|
| 15 |
+
selection = st.sidebar.radio("Go to", list(PAGES.keys()))
|
| 16 |
+
|
| 17 |
+
page = PAGES[selection]
|
| 18 |
+
page.app()
|
bio.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import shelve
|
| 2 |
+
import streamlit as st
|
| 3 |
+
import re # For regex operations
|
| 4 |
+
|
| 5 |
+
def validate_email(email):
|
| 6 |
+
"""Validate the email format."""
|
| 7 |
+
pattern = r"^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$"
|
| 8 |
+
return re.match(pattern, email) is not None
|
| 9 |
+
|
| 10 |
+
def validate_mobile(mobile_number):
|
| 11 |
+
"""Validate the mobile number format (Indian mobile numbers)."""
|
| 12 |
+
pattern = r"^[6-9]\d{9}$"
|
| 13 |
+
return re.match(pattern, mobile_number) is not None
|
| 14 |
+
|
| 15 |
+
def analyze_text(text):
|
| 16 |
+
"""Placeholder function for analyzing text to extract key points. In practice, use NLP techniques."""
|
| 17 |
+
# For demonstration, simply split the text and pick a few items.
|
| 18 |
+
words = text.split()
|
| 19 |
+
return ", ".join(words[:5]) + "..." if len(words) > 5 else ", ".join(words)
|
| 20 |
+
|
| 21 |
+
def app():
|
| 22 |
+
st.title("Candidate Bio and Interview Details Form")
|
| 23 |
+
|
| 24 |
+
# Candidate Bio Section
|
| 25 |
+
st.header("Candidate Bio")
|
| 26 |
+
name = st.text_input("Name")
|
| 27 |
+
email = st.text_input("Email")
|
| 28 |
+
mobile_number = st.text_input("Mobile Number")
|
| 29 |
+
years_of_experience = st.number_input("Years of Experience", min_value=0, format="%d")
|
| 30 |
+
past_companies = st.text_area("Past Companies Worked At (separate by commas)")
|
| 31 |
+
past_roles = st.text_area("Past Roles and Responsibilities")
|
| 32 |
+
|
| 33 |
+
# Interview Details Section
|
| 34 |
+
st.header("Interview Details")
|
| 35 |
+
company_name = st.text_input("Company Name")
|
| 36 |
+
interviewing_department = st.text_input("Interviewing Department")
|
| 37 |
+
role_applied_for = st.text_input("Role Applied For")
|
| 38 |
+
job_description = st.text_area("Job Description")
|
| 39 |
+
interview_type = st.selectbox("Type of Interview", ["Technical", "HR", "Case Study", "Other"])
|
| 40 |
+
focus_areas = st.text_area("Key Areas for Preparation")
|
| 41 |
+
|
| 42 |
+
if st.button("Submit"):
|
| 43 |
+
# Validation
|
| 44 |
+
if not validate_email(email):
|
| 45 |
+
st.error("Please enter a valid email address.")
|
| 46 |
+
return
|
| 47 |
+
if not validate_mobile(mobile_number):
|
| 48 |
+
st.error("Please enter a valid mobile number.")
|
| 49 |
+
return
|
| 50 |
+
|
| 51 |
+
with shelve.open("datastore") as db:
|
| 52 |
+
db["candidate_bio"] = {
|
| 53 |
+
"name": name,
|
| 54 |
+
"email": email,
|
| 55 |
+
"mobile_number": mobile_number,
|
| 56 |
+
"years_of_experience": years_of_experience,
|
| 57 |
+
"past_companies": past_companies,
|
| 58 |
+
"past_roles": past_roles,
|
| 59 |
+
"company_name": company_name,
|
| 60 |
+
"interviewing_department": interviewing_department,
|
| 61 |
+
"role_applied_for": role_applied_for,
|
| 62 |
+
"job_description": job_description,
|
| 63 |
+
"interview_type": interview_type,
|
| 64 |
+
"focus_areas": focus_areas
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
st.success("Form Submitted Successfully!")
|
| 69 |
+
st.write("## Summary")
|
| 70 |
+
st.write(f"Name: {name}")
|
| 71 |
+
st.write(f"Email: {email}")
|
| 72 |
+
st.write(f"Mobile Number: {mobile_number}")
|
| 73 |
+
st.write(f"Years of Experience: {years_of_experience}")
|
| 74 |
+
st.write(f"Past Companies: {past_companies}")
|
| 75 |
+
st.write(f"Past Roles: {analyze_text(past_roles)}")
|
| 76 |
+
st.write(f"Company Name: {company_name}")
|
| 77 |
+
st.write(f"Interviewing Department: {interviewing_department}")
|
| 78 |
+
st.write(f"Role Applied For: {role_applied_for}")
|
| 79 |
+
st.write(f"Job Description: {analyze_text(job_description)}")
|
| 80 |
+
st.write(f"Type of Interview: {interview_type}")
|
| 81 |
+
st.write(f"Key Areas for Preparation: {focus_areas}")
|
| 82 |
+
|
| 83 |
+
if __name__ == "__main__":
|
| 84 |
+
app()
|
display.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import shelve
|
| 3 |
+
|
| 4 |
+
def app():
|
| 5 |
+
st.title("Display Candidate Bio and Interview Details")
|
| 6 |
+
|
| 7 |
+
# Use shelve to read saved data
|
| 8 |
+
with shelve.open("datastore") as db:
|
| 9 |
+
candidate_bio = db.get("candidate_bio", {}) # Default to empty dict if not found
|
| 10 |
+
|
| 11 |
+
if candidate_bio:
|
| 12 |
+
st.write("## Candidate Bio and Interview Details")
|
| 13 |
+
for key, value in candidate_bio.items():
|
| 14 |
+
st.text(f"{key.replace('_', ' ').capitalize()}: {value}")
|
| 15 |
+
else:
|
| 16 |
+
st.error("No data found. Please ensure the bio form has been submitted.")
|
| 17 |
+
|
| 18 |
+
if __name__ == "__main__":
|
| 19 |
+
app()
|
evaluate.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import google.generativeai as genai
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
# Load responses from a file
|
| 6 |
+
def load_responses(filename="interview_responses_round_1.csv"):
|
| 7 |
+
try:
|
| 8 |
+
return pd.read_csv(filename)
|
| 9 |
+
except FileNotFoundError:
|
| 10 |
+
print("Response file not found. Please ensure that 'round1.py' has been run and responses have been saved.")
|
| 11 |
+
exit()
|
| 12 |
+
|
| 13 |
+
# Configure and initialize Gemini
|
| 14 |
+
def configure_gemini():
|
| 15 |
+
api_key = os.getenv("GOOGLE_GENERATIVE_AI_API_KEY")
|
| 16 |
+
if not api_key:
|
| 17 |
+
print("Google Generative AI API key not set. Please set your API key as an environment variable.")
|
| 18 |
+
exit()
|
| 19 |
+
|
| 20 |
+
genai.configure(api_key=api_key)
|
| 21 |
+
|
| 22 |
+
generation_config = {
|
| 23 |
+
"temperature": 0.9,
|
| 24 |
+
"top_p": 1,
|
| 25 |
+
"top_k": 50,
|
| 26 |
+
"max_output_tokens": 512,
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
safety_settings = [
|
| 30 |
+
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
|
| 31 |
+
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
|
| 32 |
+
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
|
| 33 |
+
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
|
| 34 |
+
]
|
| 35 |
+
|
| 36 |
+
return genai.GenerativeModel(model_name="gemini-1.0-pro",
|
| 37 |
+
generation_config=generation_config,
|
| 38 |
+
safety_settings=safety_settings)
|
| 39 |
+
|
| 40 |
+
def evaluate_responses(model, responses):
|
| 41 |
+
evaluations = []
|
| 42 |
+
for index, row in responses.iterrows():
|
| 43 |
+
try:
|
| 44 |
+
response = model.generate_content([row['response']])
|
| 45 |
+
evaluations.append(response.text)
|
| 46 |
+
except Exception as e:
|
| 47 |
+
print(f"An error occurred while evaluating response {index+1}: {e}")
|
| 48 |
+
evaluations.append("Error during evaluation.")
|
| 49 |
+
|
| 50 |
+
return evaluations
|
| 51 |
+
|
| 52 |
+
def print_evaluations(responses, evaluations):
|
| 53 |
+
for index, (response, evaluation) in enumerate(zip(responses['response'], evaluations), start=1):
|
| 54 |
+
print(f"Response {index}: {response}")
|
| 55 |
+
print(f"Evaluation: {evaluation}")
|
| 56 |
+
print("----------")
|
| 57 |
+
|
| 58 |
+
if __name__ == "__main__":
|
| 59 |
+
responses = load_responses()
|
| 60 |
+
if not responses.empty:
|
| 61 |
+
model = configure_gemini()
|
| 62 |
+
evaluations = evaluate_responses(model, responses)
|
| 63 |
+
print_evaluations(responses, evaluations)
|
round1.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import streamlit as st
|
| 3 |
+
import shelve
|
| 4 |
+
# Import Google's Generative AI package
|
| 5 |
+
import google.generativeai as genai
|
| 6 |
+
|
| 7 |
+
# Configure the Gemini API with your API key
|
| 8 |
+
genai.configure(api_key="AIzaSyCA4__JMC_ZIQ9xQegIj5LOMLhSSrn3pMw")
|
| 9 |
+
|
| 10 |
+
def generate_questions(company_name, role_applied_for, job_description, interview_type, focus_areas):
|
| 11 |
+
"""
|
| 12 |
+
Generates custom interview questions based on the provided details,
|
| 13 |
+
explicitly asking for the output in a list format.
|
| 14 |
+
"""
|
| 15 |
+
generation_config = {
|
| 16 |
+
"temperature": 0.9,
|
| 17 |
+
"top_p": 1,
|
| 18 |
+
"top_k": 1,
|
| 19 |
+
"max_output_tokens": 2048,
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
safety_settings = [
|
| 23 |
+
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
|
| 24 |
+
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
|
| 25 |
+
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
|
| 26 |
+
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
|
| 27 |
+
]
|
| 28 |
+
|
| 29 |
+
model = genai.GenerativeModel(model_name="gemini-1.0-pro",
|
| 30 |
+
generation_config=generation_config,
|
| 31 |
+
safety_settings=safety_settings)
|
| 32 |
+
|
| 33 |
+
# Adjust the prompt to explicitly request a list of questions
|
| 34 |
+
prompt_parts = [
|
| 35 |
+
f"Based on the following details, generate a list of interview questions:",
|
| 36 |
+
f"Company Name: {company_name}",
|
| 37 |
+
f"Role Applied For: {role_applied_for}",
|
| 38 |
+
f"Job Description: {job_description}",
|
| 39 |
+
f"Interview Type: {interview_type}",
|
| 40 |
+
f"Focus Areas: {focus_areas}",
|
| 41 |
+
"Format the questions as a bulleted list."
|
| 42 |
+
]
|
| 43 |
+
|
| 44 |
+
# Joining the prompt parts to form the complete prompt
|
| 45 |
+
complete_prompt = "\n".join(prompt_parts)
|
| 46 |
+
|
| 47 |
+
# Call Gemini to generate content
|
| 48 |
+
response = model.generate_content(complete_prompt)
|
| 49 |
+
return response.text
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def parse_response_text(response_text):
|
| 55 |
+
"""
|
| 56 |
+
Manually parse the structured response text into a dictionary.
|
| 57 |
+
This function assumes a very specific format and might need adjustments
|
| 58 |
+
based on actual response variations.
|
| 59 |
+
"""
|
| 60 |
+
questions_dict = {}
|
| 61 |
+
current_section = ""
|
| 62 |
+
for line in response_text.split("\n"):
|
| 63 |
+
if line.strip().endswith(": {"):
|
| 64 |
+
# This is a section header
|
| 65 |
+
current_section = line.split(":")[0].strip()
|
| 66 |
+
questions_dict[current_section] = {}
|
| 67 |
+
elif line.strip().startswith('"Question'):
|
| 68 |
+
# This is a question within the current section
|
| 69 |
+
question_number, question_text = line.strip().split(":")
|
| 70 |
+
question_text = question_text.strip().strip('"').strip(",")
|
| 71 |
+
questions_dict[current_section][question_number.strip('"')] = question_text
|
| 72 |
+
return questions_dict
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def app():
|
| 76 |
+
st.title("Interview Round 1 Preparation")
|
| 77 |
+
|
| 78 |
+
# Load saved data from bio.py
|
| 79 |
+
with shelve.open("datastore") as db:
|
| 80 |
+
candidate_bio = db.get("candidate_bio", {})
|
| 81 |
+
|
| 82 |
+
if candidate_bio:
|
| 83 |
+
# Displaying loaded information for verification
|
| 84 |
+
st.write("## Loaded Information for Interview Preparation")
|
| 85 |
+
# Extract necessary details
|
| 86 |
+
company_name = candidate_bio.get("company_name", "Not specified")
|
| 87 |
+
interviewing_department = candidate_bio.get("interviewing_department", "Not specified")
|
| 88 |
+
role_applied_for = candidate_bio.get("role_applied_for", "Not specified")
|
| 89 |
+
job_description = candidate_bio.get("job_description", "Not specified")
|
| 90 |
+
interview_type = candidate_bio.get("interview_type", "Not specified")
|
| 91 |
+
focus_areas = candidate_bio.get("focus_areas", "Not specified")
|
| 92 |
+
|
| 93 |
+
# Display the loaded information
|
| 94 |
+
st.write(f"**Company Name:** {company_name}")
|
| 95 |
+
st.write(f"**Interviewing Department:** {interviewing_department}")
|
| 96 |
+
st.write(f"**Role Applied For:** {role_applied_for}")
|
| 97 |
+
st.write(f"**Job Description:** {job_description}")
|
| 98 |
+
st.write(f"**Type of Interview:** {interview_type}")
|
| 99 |
+
st.write(f"**Focus Areas for Preparation:** {focus_areas}")
|
| 100 |
+
|
| 101 |
+
# Generate and display questions
|
| 102 |
+
if st.button("Generate Interview Questions"):
|
| 103 |
+
questions_text = generate_questions(company_name, role_applied_for, job_description, interview_type, focus_areas)
|
| 104 |
+
questions_list = questions_text.split('\n') # Splitting the generated text into a list of questions
|
| 105 |
+
|
| 106 |
+
# Initialize a place to store responses
|
| 107 |
+
responses = {}
|
| 108 |
+
for i, question in enumerate(questions_list, start=1):
|
| 109 |
+
st.write(f"**Question {i}:** {question}")
|
| 110 |
+
# Capture the user's response
|
| 111 |
+
response = st.text_input(f"Your answer to Question {i}", key=f"response_{i}")
|
| 112 |
+
responses[f"Question {i}"] = response
|
| 113 |
+
|
| 114 |
+
# Optional: Display captured responses
|
| 115 |
+
if st.button("Submit Responses"):
|
| 116 |
+
st.write("## Your Responses")
|
| 117 |
+
for question, response in responses.items():
|
| 118 |
+
st.write(f"{question}: {response}")
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
if __name__ == "__main__":
|
| 125 |
+
app()
|