DreamStream-1's picture
Update app.py
0238247 verified
raw
history blame
6.15 kB
import gradio as gr
import os
import csv
import re
import requests
from sentence_transformers import SentenceTransformer, util
from PyPDF2 import PdfReader # For handling PDF files
# Initialize Sentence-Transformer model
model = SentenceTransformer('paraphrase-MiniLM-L6-v2')
# Get Google API key from Hugging Face Secrets
google_api_key = os.getenv("GOOGLE_API_KEY") # Get the key from environment variables
# Define a function to extract leadership experience from resume text
def extract_leadership_experience(resume_text):
leadership_keywords = [
"led", "managed", "team lead", "supervised", "coordinated", "directed",
"oversaw", "responsible for", "led a team", "executed", "mentored",
"project manager", "leadership role", "department head", "team captain"
]
resume_text_lower = resume_text.lower()
leadership_experience = []
for keyword in leadership_keywords:
if re.search(r"\b" + re.escape(keyword) + r"\b", resume_text_lower):
leadership_experience.append(keyword)
return ", ".join(set(leadership_experience)) if leadership_experience else "No leadership experience found"
# Function to extract name, email, and contact information using Google API
def extract_entities_via_google(resume_text):
endpoint = "https://language.googleapis.com/v1/documents:analyzeEntities" # Google NLP API endpoint
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {google_api_key}" # Use the Google API key here
}
# Define request payload for Google API
payload = {
"document": {
"type": "PLAIN_TEXT",
"content": resume_text
}
}
# Make the API request to Google
response = requests.post(endpoint, json=payload, headers=headers)
if response.status_code == 200:
data = response.json()
entities = data.get("entities", [])
# Extracting name, email, and contact details (mocked here as needed)
name = next((entity['name'] for entity in entities if entity['type'] == 'PERSON'), 'Unknown')
email = next((entity['name'] for entity in entities if 'email' in entity['name'].lower()), 'No Email')
contact = next((entity['name'] for entity in entities if 'phone' in entity['name'].lower()), 'No Contact')
return {"name": name, "email": email, "contact": contact}
else:
return {"name": "Unknown", "email": "No Email", "contact": "No Contact"}
# Function to extract text from resumes (.txt, .pdf)
def extract_text_from_resume(resume_file):
try:
if resume_file.name.endswith('.txt'):
with open(resume_file.name, 'r') as file:
return file.read()
elif resume_file.name.endswith('.pdf'):
# Use PyPDF2 to extract text from PDF
pdf_reader = PdfReader(resume_file.name)
text = ""
for page in pdf_reader.pages:
text += page.extract_text()
return text
else:
return ""
except Exception as e:
return ""
# Function to save results to CSV
def save_results_to_csv(results):
csv_file_path = "/tmp/resume_results.csv"
with open(csv_file_path, mode='w', newline='') as file:
writer = csv.writer(file)
writer.writerow(["Resume Name", "Similarity Score (%)", "Eligibility", "Name", "Leadership Experience", "Email", "Contact"])
for result in results:
writer.writerow(result)
return csv_file_path
# Function to check similarity and process resumes
def check_similarity(job_description, resume_files):
results = []
job_emb = model.encode(job_description, convert_to_tensor=True)
for resume_file in resume_files:
resume_text = extract_text_from_resume(resume_file)
if not resume_text:
results.append((resume_file.name, 0, "Not Eligible", None, "No leadership experience", "No Email", "No Contact"))
continue
# Check for similarity between resume and job description
resume_emb = model.encode(resume_text, convert_to_tensor=True)
similarity_score = util.pytorch_cos_sim(job_emb, resume_emb)[0][0].item()
similarity_percentage = similarity_score * 100
leadership_experience = extract_leadership_experience(resume_text)
# Extract name, email, and contact info using Google API
contact_info = extract_entities_via_google(resume_text)
if similarity_score >= 0.50:
candidate_name = contact_info.get('name', 'Unknown Candidate')
results.append((
resume_file.name,
similarity_percentage,
"Eligible",
candidate_name,
leadership_experience,
contact_info.get('email', 'No Email'),
contact_info.get('contact', 'No Contact')
))
else:
results.append((
resume_file.name,
similarity_percentage,
"Not Eligible",
None,
leadership_experience,
contact_info.get('email', 'No Email'),
contact_info.get('contact', 'No Contact')
))
# Return results and CSV file path
csv_file_path = save_results_to_csv(results)
return results, csv_file_path
# Gradio interface
with gr.Blocks() as demo:
with gr.Row():
job_desc_input = gr.Textbox(label="Job Description", lines=3)
resume_input = gr.Files(label="Upload Resumes", file_count="multiple", file_types=[".pdf", ".txt"])
results_output = gr.Dataframe(headers=["Resume Name", "Similarity Score (%)", "Eligibility", "Name", "Leadership Experience", "Email", "Contact"])
# Define the button to trigger similarity check
check_button = gr.Button("Check Similarity")
# Set up button's action
check_button.click(check_similarity, inputs=[job_desc_input, resume_input], outputs=[results_output, gr.File(label="Download CSV", value=save_results_to_csv)])
# Launch Gradio interface
demo.launch()