Resume_Chatbot / app.py
Sadashiv's picture
Adjust the chatbox height
b6dda82 verified
from dotenv import load_dotenv
from openai import OpenAI
import json
import os
import requests
import gradio as gr
import fitz # PyMuPDF
# load the environment variables
load_dotenv(override=True)
# Setting up pushover for notification
pushover_user = os.getenv("PUSHOVER_USER")
pushover_token = os.getenv("PUSHOVER_TOKEN")
pushover_url = "https://api.pushover.net/1/messages.json"
# function to send notifications
def push(message: str):
if pushover_user and pushover_token:
payload = {"user": pushover_user, "token": pushover_token, "message": message}
try:
requests.post(pushover_url, data=payload, timeout=5)
except requests.exceptions.RequestError as e:
print(f"Pushover notification failed: {e}")
else:
print("Pushover credentials not found. Skipping notification")
# Function to record the user details
def record_user_details(email: str, name: str='Name not provided', notes: str='Notes not provided'):
push(f"Recording interest from {name} with email {email} and notes {notes}")
return {"recorded": "ok"}
# Function to record unknown questions
def record_unknown_question(question):
push(f"Recording {question} asked that I couldn't answer")
return {"recorded": "ok"}
# Tool to record user details
record_user_details_json = {
"name": "record_user_details",
"description": "Use this tool to record that a user is interested in being touch and provided an email address",
"parameters": {
"type": "object",
"properties": {
"email": {"type": "string", "description": "The email address of this user"},
"name": {"type": "string", "description": "The user's name, if they provided it"},
"notes": {"type": "string", "description": "Any additional information about the conversation that's worth recording to give context"}
},
"required": ["email"],
"additionalProperties": False
}
}
# Tool to log unanswered questions
record_unknown_question_json = {
"name": "record_unknown_question",
"description": "Always use this tool to record any question that couldn't be answered as you didn't know the answer",
"parameters": {
"type": "object",
"properties": {
"question": {"type": "string", "description": "The question that you couldn't answered"}
},
"required": ["question"],
"additionalProperties": False
}
}
# List of tools for the LLM
tools = [
{"type": "function", "function": record_user_details_json},
{"type": "function", "function": record_unknown_question_json}
]
class ResumeChatbot:
def __init__(self):
self.open_ai = OpenAI()
def extract_text_from_pdf(self, pdf_path):
"""Extracts text from a given PDF file path."""
try:
doc = fitz.open(pdf_path)
full_text = ""
for page in doc:
full_text += page.get_text()
return full_text
except Exception as e:
print(f"Error reading PDF: {e}")
return None
def handle_tool_call(self, tool_calls):
results = []
for tool_call in tool_calls:
tool_name = tool_call.function.name
arguments = json.loads(tool_call.function.arguments)
tool = globals().get(tool_name)
result = tool(**arguments) if tool else {}
results.append({
"role": "tool",
"content": json.dumps(result),
"tool_call_id": tool_call.id
})
return results
def get_system_prompt(self, resume_text):
system_prompt = f"""
You are acting as an expert assistant representing the individual whose resume is provided below.
Your task is to answer questions strictly based on the information contained in the resume.
Do not fabricate or assume any details that are not explicitly mentioned in the resume.
If asked about improvements or suggestions, respond with clear, concise, and focused points only.
Keep your answers compact and to the point, and expand only if the user explicitly asks for more details.
If a user asks a question you cannot answer from the resume, use the record_unknown_question tool to log the unanswered query.
If the user expresses interest in following up or staying in touch, politely ask for their name and email,
then record it using the record_user_details tool.
Resume Content:
{resume_text}
"""
return system_prompt
def chat(self, message: str, history: list, resume_text: str):
system_prompt = self.get_system_prompt(resume_text)
# Convert Gradio chat_history to OpenAI messages format
formatted_history = []
for user_msg, bot_msg in history:
if user_msg is not None: # User messages are not None when they've actually typed something
formatted_history.append({"role": "user", "content": user_msg})
if bot_msg is not None: # Bot messages are not None when they've responded
formatted_history.append({"role": "assistant", "content": bot_msg})
# Construct the full message history: system prompt, formatted chat history, and new user message
messages = [{"role": "system", "content": system_prompt}] + formatted_history + [{"role": "user", "content": message}]
done = False # Flag to track when the chat loop should stop
while not done:
# Call the OpenAI chat model with messages and available tools
response = self.open_ai.chat.completions.create(
model="gpt-4o-mini", # Model to use
messages=messages, # Full conversation history
tools=tools # Pass in tools so the LLM can invoke them
)
# Check how the model decided to end its generation
finish_reason = response.choices[0].finish_reason
# If the model wants to call a tool, handle the tool calls
if finish_reason == "tool_calls":
message_response = response.choices[0].message # Extract the message containing the tool call
tool_calls = message_response.tool_calls # Get the list of tool calls
results = self.handle_tool_call(tool_calls) # Run the tools and get their results
messages.append(message_response) # Add the original tool call message to history
messages.extend(results) # Add tool results to message history for LLM to continue
else:
# If no tool call is needed, we're done and can return the final response
done = True
# Return the final message content from the model as the assistant's reply
return response.choices[0].message.content
# Create a single instance of the Me class
chatbot_instance = ResumeChatbot()
def upload_and_process_resume(file_obj):
"""
Gradio function to handle file uploads.
It extracts text from the uploaded PDF and stores it.
"""
if file_obj is None:
return None, [], "Please upload a PDF resume to begin."
# The file_obj has a .name attribute which is the temporary path to the uploaded file
resume_text = chatbot_instance.extract_text_from_pdf(file_obj.name)
if resume_text is None or not resume_text.strip():
return None, [], "Could not read text from the uploaded PDF. Please try another file."
# Clear chat history and provide a welcome message
# The welcome message is structured to fit Gradio's chat history format
initial_message = "Thank you for uploading the resume. How can I help you today?"
chat_history = [[None, initial_message]] # User message is None for the initial bot message
return resume_text, chat_history, "" # returns resume_text to state, updated chatbot, and clears textbox
def respond(message: str, chat_history: list, resume_state: str):
"""
Gradio function to handle the chat interaction.
It gets the resume text from the session's state.
"""
if not resume_state:
# If no resume has been uploaded yet
chat_history.append([message, "Please upload a resume before starting the conversation."])
return "", chat_history
# Get the bot's response
# The chat_history passed to chatbot_instance.chat is still in Gradio's format
bot_message = chatbot_instance.chat(message, chat_history, resume_state)
chat_history.append([message, bot_message]) # Append the new user message and bot response to Gradio's history
return "", chat_history # Clears the textbox and returns the updated history
# --- Gradio Interface ---
if __name__ == "__main__":
with gr.Blocks(theme=gr.themes.Soft(), title="Resume Chatbot") as demo:
# State to hold the extracted resume text for the user's session
resume_text_state = gr.State(None)
gr.Markdown("# Chat with a Resume")
gr.Markdown("Upload a PDF resume below, then ask questions about it.")
with gr.Row():
with gr.Column(scale=1):
file_uploader = gr.File(
label="Upload PDF Resume",
file_types=[".pdf"],
type="filepath" # Passes the temporary filepath to the function
)
with gr.Column(scale=2):
chatbot = gr.Chatbot(label="Conversation", height=350)
msg_box = gr.Textbox(label="Your Question", placeholder="e.g., What are the key skills mentioned?")
submit_btn = gr.Button("Send")
# Event handler for the file upload
file_uploader.upload(
fn=upload_and_process_resume,
inputs=[file_uploader],
outputs=[resume_text_state, chatbot, msg_box]
)
# Event handlers for chat submission
msg_box.submit(
fn=respond,
inputs=[msg_box, chatbot, resume_text_state],
outputs=[msg_box, chatbot]
)
submit_btn.click(
fn=respond,
inputs=[msg_box, chatbot, resume_text_state],
outputs=[msg_box, chatbot]
)
demo.launch()