Spaces:
Build error
Build error
| # -*- coding: utf-8 -*- | |
| """Feedaltytics Recruiters Evaluation - FeedView.py | |
| Automatically generated by Colab. | |
| Original file is located at | |
| https://colab.research.google.com/drive/10ERbz5c3Zuw50gSoCCDEuqP4wGsvdAaM | |
| """ | |
| from docx import Document | |
| import openai | |
| import gradio as gr | |
| # Set OpenAI API key | |
| openai.api_key = 'sk-HPlLtHJKP7W3LXMh1-5lCzq9iJAepCFcm-6Ahvb4dmT3BlbkFJj2yl0vrpYL9D1YUHRGjk1RbraVK2Cy4zjYKiBPeooA' | |
| # User credentials | |
| USER_CREDENTIALS = {"admin": "password123"} # Replace with desired username/password pairs | |
| # Function to extract text from DOCX files | |
| def extract_text_from_docx(file): | |
| doc = Document(file) | |
| return '\n'.join([para.text for para in doc.paragraphs]) | |
| # Function to extract top 5 criteria and generate interview questions | |
| def extract_criteria_and_questions(job_description): | |
| try: | |
| user_content = ( | |
| f"Job Description:\n{job_description}\n\n" | |
| "Based on this job description, please identify the top 5 key attributes or skills required for this role. " | |
| "For each attribute, provide 2-3 suggested interview questions that recruiters can use to assess candidates effectively." | |
| ) | |
| response = openai.ChatCompletion.create( | |
| model="gpt-3.5-turbo-16k", | |
| messages=[ | |
| { | |
| "role": "system", | |
| "content": "You are an HR expert tasked with analyzing job descriptions and generating " | |
| "key attributes to assess candidates, along with relevant interview questions. Be precise and concise." | |
| }, | |
| { | |
| "role": "user", | |
| "content": user_content | |
| } | |
| ], | |
| max_tokens=1000, | |
| temperature=0.0, | |
| top_p=1.0 | |
| ) | |
| result = response['choices'][0]['message']['content'].strip() | |
| return result | |
| except openai.error.OpenAIError as e: | |
| return f"OpenAI API Error: {e}" | |
| except Exception as e: | |
| return f"Unexpected Error: {e}" | |
| # Function to generate Recruiter's Report | |
| def generate_recruiter_report(candidate_transcript, job_description, benchmark, criteria): | |
| try: | |
| user_content = ( | |
| f"Job Description:\n{job_description}\n\n" | |
| f"Benchmark Transcript:\n{benchmark}\n\n" | |
| f"Candidate Transcript:\n{candidate_transcript}\n\n" | |
| f"Evaluation Criteria:\n{criteria}\n\n" | |
| "Evaluate the candidate's overall performance against all criteria. Provide a detailed summary of their " | |
| "strengths, areas for improvement, and an overall score based on the following grading scale:\n" | |
| "Rate the candidate on a scale of 1-10 for each factor:\n" | |
| "- **1**: Very poor, far below expectations, minimal skills or understanding.\n" | |
| "- **2-3**: Below average, some understanding but lacking in key areas.\n" | |
| "- **4-5**: Average, meets basic expectations with minimal distinction.\n" | |
| "- **6-7**: Above average, solid performance with minor gaps.\n" | |
| "- **8-9**: Strong, exceeds expectations with well-supported examples.\n" | |
| "- **10**: Outstanding, exceptional performance with comprehensive examples.\n" | |
| "Provide a total score out of 50 and a recommendation based on their suitability for the role." | |
| ) | |
| response = openai.ChatCompletion.create( | |
| model="gpt-3.5-turbo-16k", | |
| messages=[ | |
| { | |
| "role": "system", | |
| "content": ( | |
| "You are a recruitment specialist evaluating candidates for a role. Use the provided criteria " | |
| "and benchmarks to ensure consistency across evaluations. Keep the output professional." | |
| ) | |
| }, | |
| { | |
| "role": "user", | |
| "content": user_content | |
| } | |
| ], | |
| max_tokens=1500, | |
| temperature=0.0, | |
| top_p=1.0 | |
| ) | |
| report = response['choices'][0]['message']['content'].strip() | |
| return report | |
| except openai.error.OpenAIError as e: | |
| return f"OpenAI API Error: {e}" | |
| except Exception as e: | |
| return f"Unexpected Error: {e}" | |
| # Function to compare candidates and recommend the best fit | |
| def compare_candidates_for_role(evaluation_reports, job_description): | |
| try: | |
| user_content = ( | |
| f"Job Description:\n{job_description}\n\n" | |
| f"Recruiter's Evaluation Reports:\n{evaluation_reports}\n\n" | |
| "Based on the provided job description and evaluation reports for multiple candidates, " | |
| "analyze and compare their performance. Identify the candidate who best matches the role requirements, " | |
| "and provide a summary of why this candidate is the best fit. Highlight any specific attributes or skills " | |
| "that set them apart from others." | |
| ) | |
| response = openai.ChatCompletion.create( | |
| model="gpt-3.5-turbo-16k", | |
| messages=[ | |
| { | |
| "role": "system", | |
| "content": ( | |
| "You are an expert recruiter tasked with analyzing evaluation reports and recommending the best " | |
| "candidate for a role. Provide a detailed analysis with clear justification for your choice." | |
| ) | |
| }, | |
| { | |
| "role": "user", | |
| "content": user_content | |
| } | |
| ], | |
| max_tokens=1500, | |
| temperature=0.0, | |
| top_p=1.0 | |
| ) | |
| recommendation = response['choices'][0]['message']['content'].strip() | |
| return recommendation | |
| except openai.error.OpenAIError as e: | |
| return f"OpenAI API Error: {e}" | |
| except Exception as e: | |
| return f"Unexpected Error: {e}" | |
| # Function to process multiple transcripts | |
| def process_transcripts(job_description_file, benchmark_file, candidate_files): | |
| job_description = extract_text_from_docx(job_description_file) | |
| benchmark = extract_text_from_docx(benchmark_file) | |
| candidate_transcripts = [extract_text_from_docx(candidate_file) for candidate_file in candidate_files] | |
| # Step 1: Extract top 5 criteria | |
| criteria_and_questions = extract_criteria_and_questions(job_description) | |
| # Step 2: Evaluate each candidate using all criteria | |
| criteria = criteria_and_questions.split("\n\n")[:5] # Extract top criteria only | |
| criteria_text = "\n\n".join(criteria) | |
| all_reports = [] | |
| individual_reports = {} | |
| for i, transcript in enumerate(candidate_transcripts): | |
| report = generate_recruiter_report(transcript, job_description, benchmark, criteria_text) | |
| all_reports.append(f"--- Candidate {i + 1} ---\n{report}\n") | |
| individual_reports[f"candidate_{i + 1}"] = report | |
| # Step 3: Compare candidates and recommend the best fit | |
| comparison_report = compare_candidates_for_role("\n".join(all_reports), job_description) | |
| return criteria_and_questions, "\n".join(all_reports), individual_reports, comparison_report | |
| # Gradio interface setup | |
| def main(): | |
| with gr.Blocks(css=""" | |
| #company-logo img { | |
| width: 150px; | |
| height: auto; | |
| margin: 0 auto; | |
| display: block; | |
| } | |
| """) as interface: | |
| username = gr.Textbox(label="Username") | |
| password = gr.Textbox(label="Password", type="password") | |
| login_button = gr.Button("Login") | |
| auth_status = gr.Textbox(label="Login Status", interactive=False) | |
| with gr.Group(visible=False) as app_interface: | |
| gr.Image( | |
| value="https://drive.google.com/uc?id=1tgTSBVCm6gvg6EGEsgHsjsqyiGefxqHP", | |
| show_label=False, | |
| elem_id="company-logo" | |
| ) | |
| gr.Markdown("### Recruiter's Evaluation Tool") | |
| mode = gr.Radio(choices=["Generate Criteria", "Evaluate Candidates"], label="Choose Mode") | |
| job_description_file = gr.File(label="Job Description (.docx)") | |
| # Criteria Generation Outputs | |
| criteria_output = gr.Textbox(label="Generated Criteria", lines=15) | |
| # Candidate Evaluation Inputs and Outputs | |
| benchmark_file = gr.File(label="Benchmark Transcript (.docx)") | |
| candidate_files = gr.File(label="Candidate Transcripts (.docx)", file_count="multiple") | |
| recruiter_output = gr.Textbox(label="Evaluation Reports", lines=30) | |
| comparison_output = gr.Textbox(label="Best Fit Recommendation", lines=10) | |
| def generate_criteria(job_description_file): | |
| job_description = extract_text_from_docx(job_description_file) | |
| return extract_criteria_and_questions(job_description) | |
| def evaluate_candidates(job_description_file, benchmark_file, candidate_files): | |
| _, reports, _, comparison = process_transcripts(job_description_file, benchmark_file, candidate_files) | |
| return reports, comparison | |
| mode.change( | |
| lambda x: gr.update(visible=(x == "Generate Criteria")), | |
| inputs=[mode], | |
| outputs=[criteria_output] | |
| ) | |
| # Generate Criteria | |
| gr.Button("Generate Criteria").click( | |
| generate_criteria, | |
| inputs=[job_description_file], | |
| outputs=[criteria_output] | |
| ) | |
| # Evaluate Candidates | |
| gr.Button("Evaluate Candidates").click( | |
| evaluate_candidates, | |
| inputs=[job_description_file, benchmark_file, candidate_files], | |
| outputs=[recruiter_output, comparison_output] | |
| ) | |
| def authenticate(username_value, password_value): | |
| if username_value in USER_CREDENTIALS and USER_CREDENTIALS[username_value] == password_value: | |
| return gr.update(visible=True), "Login Successful!" | |
| else: | |
| return gr.update(visible=False), "Login Failed. Please check your credentials." | |
| login_button.click( | |
| authenticate, | |
| inputs=[username, password], | |
| outputs=[app_interface, auth_status] | |
| ) | |
| interface.launch() | |
| if __name__ == "__main__": | |
| main() |