Spaces:
Sleeping
Sleeping
File size: 2,592 Bytes
0e8a325 db9b92c fda0ecd 0e8a325 fda0ecd db9b92c 0e8a325 3a101db db9b92c 3a101db 0e8a325 3a101db 0e8a325 3a101db db9b92c 0e8a325 7d1f697 0e8a325 3a101db 0e8a325 3a101db 0e8a325 3a101db 0e8a325 3a101db 75a278d 0e8a325 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
import streamlit as st
import os
import faiss
import numpy as np
import PyPDF2
from sentence_transformers import SentenceTransformer
from groq import Groq
# Set Groq API key from environment variable
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
groq_client = Groq(api_key=GROQ_API_KEY)
# ✅ Use a compatible SentenceTransformer model for Hugging Face Spaces
embedder = SentenceTransformer("paraphrase-MiniLM-L6-v2", device='cpu')
# Example job descriptions
job_data = {
"Data Scientist": "Python, Machine Learning, Data Analysis, Pandas, Scikit-learn, SQL",
"Software Engineer": "C++, Java, Git, Object Oriented Programming, System Design",
"Web Developer": "HTML, CSS, JavaScript, React, Responsive Design, Web APIs"
}
job_chunks = [f"{k}: {v}" for k, v in job_data.items()]
job_embeddings = embedder.encode(job_chunks)
faiss_index = faiss.IndexFlatL2(job_embeddings.shape[1])
faiss_index.add(np.array(job_embeddings).astype("float32"))
# ----------- Functions -----------
def extract_text_from_pdf(uploaded_file):
reader = PyPDF2.PdfReader(uploaded_file)
return " ".join([page.extract_text() for page in reader.pages if page.extract_text()])
def find_best_job_match(resume_text):
query_embed = embedder.encode([resume_text])
_, indices = faiss_index.search(np.array(query_embed).astype("float32"), 1)
return job_chunks[indices[0][0]]
def query_llm(context, question):
prompt = f"""You are an expert job coach. Based on the resume below and the job context:
Context: {context}
Resume: {question}
Give a simple analysis of how well this resume fits and what's missing."""
response = groq_client.chat.completions.create(
model="meta-llama/llama-guard-4-12b",
messages=[{"role": "user", "content": prompt}]
)
return response.choices[0].message.content
# ----------- Streamlit UI -----------
st.set_page_config(page_title="🧠 Resume Analyzer + Job Match AI")
st.title("📄 Resume Analyzer + Job Match AI")
uploaded_file = st.file_uploader("Upload Your Resume (PDF)", type=["pdf"])
if uploaded_file:
resume_text = extract_text_from_pdf(uploaded_file)
st.success("Resume Uploaded Successfully!")
st.subheader("💼 Analyzing for Job Match...")
best_job = find_best_job_match(resume_text)
st.write(f"🔍 Best Job Match Found: **{best_job}**")
with st.spinner("Analyzing Resume Fit..."):
response = query_llm(best_job, resume_text)
st.subheader("🧾 AI Feedback:")
st.write(response)
st.info("This app is for guidance only. Tailor your resume per job posting!")
|