File size: 3,058 Bytes
6db8044
 
965a6a5
01a0155
 
3256e0d
 
 
6db8044
334e1f9
e9c1ffc
 
 
01a0155
 
df5a21c
965a6a5
01a0155
 
 
 
 
 
 
 
 
 
965a6a5
df5a21c
01a0155
 
 
 
 
965a6a5
01a0155
df5a21c
01a0155
 
0ac9eec
01a0155
df5a21c
 
0ac9eec
965a6a5
01a0155
965a6a5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
01a0155
965a6a5
 
01a0155
9d9ab31
 
 
 
 
 
 
 
965a6a5
9d9ab31
 
 
 
 
 
01a0155
965a6a5
01a0155
965a6a5
 
 
 
01a0155
 
 
 
0ac9eec
01a0155
 
965a6a5
01a0155
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import streamlit as st
import os
import tempfile
import docx2txt
from pdfminer.high_level import extract_text
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace

# Handle Hugging Face token
hf_token = os.getenv("HF_TOKEN")  # For local dev
if hf_token:
    os.environ["HUGGINGFACEHUB_API_KEY"] = hf_token

# UI Configuration
st.set_page_config(page_title="Resume Validator", layout="centered", page_icon="πŸ“„")

st.markdown("""
    <h1 style='text-align: center;'>πŸ“„ AI Resume Validator</h1>
    <p style='text-align: center;'>Upload your resume and receive instant feedback with suggestions for improvement</p>
    <br>
""", unsafe_allow_html=True)

# File upload
uploaded_file = st.file_uploader("πŸ“€ Upload Resume (PDF or DOCX)", type=["pdf", "docx"])

resume_text = ""

if uploaded_file:
    with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(uploaded_file.name)[-1]) as tmp_file:
        tmp_file.write(uploaded_file.read())
        temp_path = tmp_file.name

    # Extract text
    if uploaded_file.name.endswith(".pdf"):
        resume_text = extract_text(temp_path)
    else:
        resume_text = docx2txt.process(temp_path)

    # Remove temp file
    os.remove(temp_path)

    st.markdown("### πŸ“ƒ Extracted Resume Text")
    st.text_area("Resume Text", resume_text, height=300)

    # Prompt template
    template = """
    You are an expert HR recruiter.

    Here is the content of a resume:
    {resume_text}

    Evaluate the resume on the following criteria:
    1. Clarity and grammar
    2. Relevance of skills and keywords
    3. Structure (sections like Education, Experience, Projects, etc.)
    4. Overall impact

    Provide:
    - A rating out of 10
    - Key strengths
    - Weaknesses
    - Actionable suggestions to improve
    """

    prompt = PromptTemplate(input_variables=["resume_text"], template=template)

    # LLM Configuration
    # llm = HuggingFaceEndpoint(
    #     repo_id="mistralai/Mistral-7B-Instruct-v0.3",
    #     temperature=0.5,
    #     max_new_tokens=10,
    #     task="text-generation"
    # )
    # from langchain_huggingface import HuggingFaceEndpoint

    llm = HuggingFaceEndpoint(
    repo_id="mistralai/Mistral-7B-Instruct-v0.3",
    temperature=0.5,
    max_new_tokens=10,
    task="text-generation",
    huggingfacehub_api_token=os.getenv("HF"))


    model = ChatHuggingFace(llm=llm)

    chain = LLMChain(llm=model, prompt=prompt)

    if st.button("βœ… Validate Resume"):
        with st.spinner("Analyzing your resume..."):
            try:
                result = chain.run(resume_text=resume_text)
                st.success("βœ… Resume Analysis Completed")
                st.markdown("### πŸ“Š Feedback")
                st.markdown(result)
            except Exception as e:
                st.error(f"⚠️ An error occurred: {e}")
else:
    st.markdown("<center><i>Please upload your resume to start validation.</i></center>", unsafe_allow_html=True)