| import os | |
| import zipfile | |
| import tempfile | |
| import fitz | |
| import streamlit as st | |
| from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace | |
| os.environ["HF_TOKEN"]=os.getenv('HF_Token') | |
| os.environ["HUGGINGFACEHUB_API_KEY"]=os.getenv('HF_Token') | |
| base_llm = HuggingFaceEndpoint( | |
| repo_id="meta-llama/Llama-3.1-8B-Instruct", | |
| provider="novita", | |
| temperature=0.7, | |
| max_new_tokens=150, | |
| task="conversational" | |
| ) | |
| llm = ChatHuggingFace( | |
| llm=base_llm, | |
| repo_id="meta-llama/Llama-3.2-3B-Instruct", | |
| provider="novita", | |
| temperature=0.7, | |
| max_new_tokens=200, | |
| task="conversational" | |
| ) | |
| def extract_text(file_bytes): | |
| with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as tmp: | |
| tmp.write(file_bytes) | |
| text = "" | |
| doc = fitz.open(tmp.name) | |
| for page in doc: | |
| text += page.get_text() | |
| return text | |
| def resume_prompt(text): | |
| return ( | |
| "Extract the following from this resume:\n" | |
| "1. Full Name\n2. Education Background\n3. Total Years of Experience\n4. Technical & Soft Skills\n" | |
| "5. Key Projects and Outcomes\n\nResume Content:\n" + text | |
| ) | |
| def jd_prompt(text): | |
| return ( | |
| "Extract this information from the job description:\n" | |
| "1. Job ID\n2. Company Name\n3. Designation\n4. Required Experience\n" | |
| "5. Key Skills\n6. Education\n7. Location\n\nJD Text:\n" + text | |
| ) | |
| def match_prompt(jd_text, all_resumes, top_n=3): | |
| return ( | |
| f"You are an expert recruitment AI. Based on the following job description and candidate summaries, rank the top {top_n} suitable profiles.\n" | |
| "Evaluate based on: Skills match, Experience, Education, and Project Relevance.\n" | |
| f"\nJob Description:\n{jd_text}\n\nCandidates:\n{all_resumes}\n\n" | |
| f"List the Top {top_n} candidates like this:\n" | |
| "1. Candidate Name - Matching Reason\n2. Candidate Name - Matching Reason\n..." | |
| ) | |
| st.set_page_config(page_title="AI Resume Screener", layout="centered") | |
| st.title("π§ Smart Resume Screener + JD Extractor") | |
| st.markdown("Upload a ZIP of resumes and a Job Description to discover top-fit candidates, powered by AI.") | |
| zip_file = st.file_uploader("π Upload ZIP of Resumes (PDFs Only)", type="zip") | |
| jd_file = st.file_uploader("π Upload Job Description (PDF/TXT)", type=["pdf", "txt"]) | |
| jd_text_manual = st.text_area("βοΈ Or Paste JD Text Directly Below") | |
| top_n = st.slider("π― Number of Top Candidates", 1, 10, 3) | |
| if st.button("π Find Best Matches"): | |
| if not zip_file or not (jd_file or jd_text_manual.strip()): | |
| st.warning("Please upload both a ZIP file and a JD file/text.") | |
| st.stop() | |
| jd_text = "" | |
| if jd_file: | |
| jd_text = extract_text(jd_file.read()) if jd_file.name.endswith(".pdf") else jd_file.read().decode("utf-8") | |
| else: | |
| jd_text = jd_text_manual.strip() | |
| st.subheader("π Extracted JD Information") | |
| jd_response = llm.invoke(jd_prompt(jd_text)) | |
| st.markdown(jd_response.content) | |
| resume_summaries = "" | |
| with tempfile.TemporaryDirectory() as tmpdir: | |
| with zipfile.ZipFile(zip_file, "r") as archive: | |
| pdfs = [f for f in archive.namelist() if f.endswith(".pdf")] | |
| if not pdfs: | |
| st.error("No PDFs found in the ZIP file.") | |
| st.stop() | |
| st.success(f"β Found {len(pdfs)} resumes. Extracting details...") | |
| for file in pdfs: | |
| with archive.open(file) as pdf: | |
| text = extract_text(pdf.read()) | |
| summary = llm.invoke(resume_prompt(text)).content | |
| resume_summaries += f"\n\n[File: {file}]\n{summary}" | |
| with st.spinner("π Matching candidates to job description..."): | |
| final_prompt = match_prompt(jd_text, resume_summaries, top_n) | |
| match_result = llm.invoke(final_prompt) | |
| st.subheader("β Top Matched Candidates") | |
| st.markdown(match_result.content) | |