resume-analyzer / app.py
shekkari21's picture
made changes
76b5430
import os
import sys
# Add source directory to path so sibling imports work
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "Code+Folder", "src"))
import streamlit as st
try:
from streamlit_feedback import streamlit_feedback
FEEDBACK_AVAILABLE = True
except ImportError:
FEEDBACK_AVAILABLE = False
from langchain_classic.chains import ConversationChain
from langchain_classic.memory import ConversationBufferWindowMemory
from langchain_core.messages import SystemMessage
from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_community.chat_models import ChatLiteLLM
from constants import PROVIDERS, TEMPLATE_CONTENT, comparison_prompt, resume_analysis_prompt, \
job_description_analysis_prompt, gap_analysis_prompt, actionable_steps_prompt, experience_enhancement_prompt, \
additional_qualifications_prompt, resume_tailoring_prompt, relevant_skills_highlight_prompt, \
resume_formatting_prompt, resume_length_prompt
from directory_reader import DirectoryReader
st.set_page_config(page_title="Resume Reviewer")
# Initialize llm as None at the top level
llm = None
resume_chain = None
# Initialize variables
resume_content = None
job_description_content = None
# Sidebar
with st.sidebar:
st.title('Resume Reviewer')
st.write("Upload your resume for my recommendations. Job description is optional.")
# Provider & Model selection
st.write("---")
st.write("### LLM Provider")
provider_names = list(PROVIDERS.keys())
selected_provider = st.selectbox("Provider", provider_names)
provider_info = PROVIDERS[selected_provider]
selected_model = st.selectbox("Model", provider_info["models"])
st.write(f"Get an API key at [{selected_provider}]({provider_info['url']})")
api_key = st.text_input(
f"{selected_provider} API Key", type="password",
help="Your API key will not be stored",
)
if api_key:
os.environ[provider_info["env_var"]] = api_key
llm = ChatLiteLLM(model=selected_model, temperature=0.0)
else:
st.info(f"Please enter your {selected_provider} API key to start")
llm = None
# Resume upload (file only)
st.write("---")
st.write("### Resume")
st.write("Note: File size should be less than 5MB")
resume_file = st.file_uploader("Upload your resume (PDF)", type=["pdf"], accept_multiple_files=False)
# JD input (file or text, optional)
st.write("---")
st.write("### Job Description (optional)")
jd_file = st.file_uploader("Upload a JD (txt file)", type=["txt"], accept_multiple_files=False)
jd_text = st.text_area("Or paste the job description here:", height=150)
# Process resume
if resume_file is not None and api_key:
try:
with st.spinner("Processing resume file..."):
directory_reader = DirectoryReader("", "")
resume_content = directory_reader.extract_text_from_pdf(resume_file)
st.sidebar.success("Resume processed successfully!")
except Exception as e:
st.sidebar.error(f"Error processing resume file: {str(e)}")
resume_content = None
# Process JD - prefer file upload, fall back to text input
if jd_file is not None:
try:
from io import StringIO
stringio = StringIO(jd_file.getvalue().decode('utf-8'))
job_description_content = stringio.read()
st.sidebar.success("JD processed successfully!")
except Exception as e:
st.sidebar.error(f"Error processing JD file: {str(e)}")
elif jd_text:
job_description_content = jd_text
# Build system prompt based on what's provided
if resume_content and job_description_content:
SYSTEM_PROMPT = "\n\n" + TEMPLATE_CONTENT + \
"<RESUME STARTS HERE> {}. <RESUME ENDS HERE> with the job description: " \
"<JOB DESCRIPTION STARTS HERE> {}.<JOB DESCRIPTION ENDS HERE>\n\n" \
"Be crisp and clear in response. DO NOT provide the resume and job description in the response.\n\n".format(
resume_content, job_description_content)
elif resume_content:
SYSTEM_PROMPT = "\n\n" + TEMPLATE_CONTENT + \
"<RESUME STARTS HERE> {}. <RESUME ENDS HERE>\n\n" \
"No job description was provided. Focus on general resume feedback, strengths, and areas for improvement. " \
"Be crisp and clear in response. DO NOT provide the resume in the response.\n\n".format(resume_content)
else:
SYSTEM_PROMPT = "\n\n" + TEMPLATE_CONTENT
# Store LLM generated responses
if "messages" not in st.session_state.keys():
st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]
# Display or clear chat messages
for message in st.session_state.messages:
if message["role"] != "feedback":
with st.chat_message(message["role"]):
st.write(message["content"])
def clear_chat_history():
global resume_chain
st.session_state.messages = [{"role": "assistant", "content": "How may I help you today?"}]
if llm is not None:
resume_chain = ConversationChain(
llm=llm,
prompt=prompt_template,
memory=memory,
verbose=False
)
def generate_report():
user_message = {"role": "user", "content": "Generate a Report!"}
st.session_state.messages.append(user_message)
if resume_content is None:
st.error("Please upload a resume first!")
return
with st.chat_message("assistant"):
with st.spinner("Just a moment..."):
resume_analysis = generate_response(resume_analysis_prompt.format(resume_content))
resume_formatting_analysis = generate_response(
resume_formatting_prompt.format(resume_content, "N/A"))
report = f"**Resume Analysis:**\n{resume_analysis}\n\n" \
f"**Resume Formatting:**\n{resume_formatting_analysis}"
if job_description_content is not None:
comparison_analysis = generate_response(
comparison_prompt.format(resume_content, job_description_content))
job_description_analysis = generate_response(
job_description_analysis_prompt.format(job_description_content))
gap_analysis = generate_response(
gap_analysis_prompt.format(resume_content, job_description_content))
actionable_steps_analysis = generate_response(
actionable_steps_prompt.format(resume_content, job_description_content))
experience_enhancement_analysis = generate_response(
experience_enhancement_prompt.format(resume_content, job_description_content))
additional_qualifications_analysis = generate_response(
additional_qualifications_prompt.format(resume_content, job_description_content))
resume_tailoring_analysis = generate_response(
resume_tailoring_prompt.format(resume_content, job_description_content))
relevant_skills_highlight_analysis = generate_response(
relevant_skills_highlight_prompt.format(resume_content, job_description_content))
resume_length_analysis = generate_response(
resume_length_prompt.format(resume_content, job_description_content))
report += f"\n\n**Comparison Analysis:**\n{comparison_analysis}\n\n" \
f"**Job Description Analysis:**\n{job_description_analysis}\n\n" \
f"**Gap Analysis:**\n{gap_analysis}\n\n" \
f"**Actionable Steps:**\n{actionable_steps_analysis}\n\n" \
f"**Experience Enhancement:**\n{experience_enhancement_analysis}\n\n" \
f"**Additional Qualifications:**\n{additional_qualifications_analysis}\n\n" \
f"**Resume Tailoring:**\n{resume_tailoring_analysis}\n\n" \
f"**Relevant Skills Highlight:**\n{relevant_skills_highlight_analysis}\n\n" \
f"**Resume Length:**\n{resume_length_analysis}"
report_message = {"role": "assistant", "content": report}
st.session_state.messages.append(report_message)
# Setup the system message and prompt template
system_message = SystemMessage(content=TEMPLATE_CONTENT)
human_message = HumanMessagePromptTemplate.from_template("{history} User:{input} Assistant:")
prompt_template = ChatPromptTemplate(messages=[system_message, human_message])
memory = ConversationBufferWindowMemory(k=2)
# Initialize the chain if llm is available
if llm is not None:
resume_chain = ConversationChain(
llm=llm,
prompt=prompt_template,
memory=memory,
verbose=False
)
def generate_response(prompt_input):
if resume_chain is None:
return "Please enter your API key to use this application"
output = resume_chain.predict(input=prompt_input)
return output
st.sidebar.button('Clear Chat History', on_click=clear_chat_history)
st.sidebar.button('Generate Report', on_click=generate_report)
def get_feedback():
st.session_state.messages.append({"role": "feedback", "content": st.session_state.fbk})
# At the beginning of your script, initialize the prompt in session state
if "current_prompt" not in st.session_state:
st.session_state.current_prompt = ""
# When user enters a prompt
if prompt := st.chat_input():
st.session_state.current_prompt = prompt
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.write(prompt)
def get_llm_response():
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response = generate_response(st.session_state.current_prompt + SYSTEM_PROMPT)
placeholder = st.empty()
placeholder.markdown(response)
full_response = response
message = {"role": "assistant", "content": full_response}
st.session_state.messages.append(message)
# Only show feedback form if the feature is available
if FEEDBACK_AVAILABLE:
with st.form("form"):
streamlit_feedback(feedback_type="thumbs", optional_text_label="[Optional] Please provide an explanation", key="fbk")
st.form_submit_button('Save feedback', on_click=get_feedback)
# Generate a new response if last message is not from assistant
if st.session_state.messages[-1]["role"] not in ["assistant", "feedback"]:
get_llm_response()
if st.session_state.messages[-1]["role"] in ["feedback"]:
try:
feedback_response = st.session_state.messages[-1]["content"]
score_mappings = {
"thumbs": {"thumbs_up": 1, "thumbs_down": 0},
}
score = score_mappings[feedback_response["type"]][feedback_response["score"]]
if score == 0:
feedback = st.session_state.messages[-1]["content"]['text']
prompt = "Please respond according to feedback '{0}' on the previous response on \n".format(feedback) \
+ st.session_state.messages[-3]["content"]
get_llm_response()
except:
pass