Sadashiv commited on
Commit
19010a2
·
verified ·
1 Parent(s): 6fa8a62

Added function to send push notification for unknown question

Browse files
Files changed (1) hide show
  1. app.py +242 -237
app.py CHANGED
@@ -1,238 +1,243 @@
1
- from dotenv import load_dotenv
2
- from openai import OpenAI
3
- import json
4
- import os
5
- import requests
6
- import gradio as gr
7
- import fitz # PyMuPDF
8
-
9
- # load the environment variables
10
- load_dotenv(override=True)
11
-
12
- # Setting up pushover for notification
13
- pushover_user = os.getenv("PUSHOVER_USER")
14
- pushover_token = os.getenv("PUSHOVER_TOKEN")
15
- pushover_url = "https://api.pushover.net/1/messages.json"
16
-
17
- # function to send notifications
18
- def push(message: str):
19
- if pushover_user and pushover_token:
20
- payload = {"user": pushover_user, "token": pushover_token, "message": message}
21
- try:
22
- requests.post(pushover_url, data=payload, timeout=5)
23
- except requests.exceptions.RequestError as e:
24
- print(f"Pushover notification failed: {e}")
25
- else:
26
- print("Pushover credentials not found. Skipping notification")
27
-
28
- # Function to record the user details
29
- def record_user_details(email: str, name: str='Name not provided', notes: str='Notes not provided'):
30
- push(f"Recording interest from {name} with email {email} and notes {notes}")
31
- return {"recorded": "ok"}
32
-
33
- # Tool to record user details
34
- record_user_details_json = {
35
- "name": "record_user_details",
36
- "description": "Use this tool to record that a user is interested in being touch and provided an email address",
37
- "parameters": {
38
- "type": "object",
39
- "properties": {
40
- "email": {"type": "string", "description": "The email address of this user"},
41
- "name": {"type": "string", "description": "The user's name, if they provided it"},
42
- "notes": {"type": "string", "description": "Any additional information about the conversation that's worth recording to give context"}
43
- },
44
- "required": ["email"],
45
- "additionalProperties": False
46
- }
47
- }
48
-
49
- # Tool to log unanswered questions
50
- record_unknown_question_json = {
51
- "name": "record_unknown_question",
52
- "description": "Always use this tool to record any question that couldn't be answered as you didn't know the answer",
53
- "parameters": {
54
- "type": "object",
55
- "properties": {
56
- "question": {"type": "string", "description": "The question that you couldn't answered"}
57
- },
58
- "required": ["question"],
59
- "additionalProperties": False
60
- }
61
- }
62
-
63
- # List of tools for the LLM
64
- tools = [
65
- {"type": "function", "function": record_user_details_json},
66
- {"type": "function", "function": record_unknown_question_json}
67
- ]
68
-
69
- class ResumeChatbot:
70
- def __init__(self):
71
- self.open_ai = OpenAI()
72
-
73
- def extract_text_from_pdf(self, pdf_path):
74
- """Extracts text from a given PDF file path."""
75
- try:
76
- doc = fitz.open(pdf_path)
77
- full_text = ""
78
- for page in doc:
79
- full_text += page.get_text()
80
- return full_text
81
- except Exception as e:
82
- print(f"Error reading PDF: {e}")
83
- return None
84
-
85
- def handle_tool_call(self, tool_calls):
86
- results = []
87
- for tool_call in tool_calls:
88
- tool_name = tool_call.function.name
89
- arguments = json.loads(tool_call.function.arguments)
90
- tool = globals().get(tool_name)
91
- result = tool(**arguments) if tool else {}
92
- results.append({
93
- "role": "tool",
94
- "content": json.dumps(result),
95
- "tool_call_id": tool_call.id
96
- })
97
- return results
98
-
99
- def get_system_prompt(self, resume_text):
100
- system_prompt = f"""
101
- You are acting as an expert assistant representing the individual whose resume is provided below.
102
- Your task is to answer questions strictly based on the information contained in the resume.
103
- Do not fabricate or assume any details that are not explicitly mentioned in the resume.
104
-
105
- If asked about improvements or suggestions, respond with clear, concise, and focused points only.
106
- Keep your answers compact and to the point, and expand only if the user explicitly asks for more details.
107
-
108
- If a user asks a question you cannot answer from the resume, use the record_unknown_question tool to log the unanswered query.
109
-
110
- If the user expresses interest in following up or staying in touch, politely ask for their name and email,
111
- then record it using the record_user_details tool.
112
-
113
- Resume Content:
114
- {resume_text}
115
- """
116
- return system_prompt
117
-
118
- def chat(self, message: str, history: list, resume_text: str):
119
- system_prompt = self.get_system_prompt(resume_text)
120
-
121
- # Convert Gradio chat_history to OpenAI messages format
122
- formatted_history = []
123
- for user_msg, bot_msg in history:
124
- if user_msg is not None: # User messages are not None when they've actually typed something
125
- formatted_history.append({"role": "user", "content": user_msg})
126
- if bot_msg is not None: # Bot messages are not None when they've responded
127
- formatted_history.append({"role": "assistant", "content": bot_msg})
128
-
129
- # Construct the full message history: system prompt, formatted chat history, and new user message
130
- messages = [{"role": "system", "content": system_prompt}] + formatted_history + [{"role": "user", "content": message}]
131
-
132
- done = False # Flag to track when the chat loop should stop
133
-
134
- while not done:
135
- # Call the OpenAI chat model with messages and available tools
136
- response = self.open_ai.chat.completions.create(
137
- model="gpt-4o-mini", # Model to use
138
- messages=messages, # Full conversation history
139
- tools=tools # Pass in tools so the LLM can invoke them
140
- )
141
-
142
- # Check how the model decided to end its generation
143
- finish_reason = response.choices[0].finish_reason
144
-
145
- # If the model wants to call a tool, handle the tool calls
146
- if finish_reason == "tool_calls":
147
- message_response = response.choices[0].message # Extract the message containing the tool call
148
- tool_calls = message_response.tool_calls # Get the list of tool calls
149
- results = self.handle_tool_call(tool_calls) # Run the tools and get their results
150
- messages.append(message_response) # Add the original tool call message to history
151
- messages.extend(results) # Add tool results to message history for LLM to continue
152
- else:
153
- # If no tool call is needed, we're done and can return the final response
154
- done = True
155
-
156
- # Return the final message content from the model as the assistant's reply
157
- return response.choices[0].message.content
158
-
159
- # Create a single instance of the Me class
160
- chatbot_instance = ResumeChatbot()
161
-
162
- def upload_and_process_resume(file_obj):
163
- """
164
- Gradio function to handle file uploads.
165
- It extracts text from the uploaded PDF and stores it.
166
- """
167
- if file_obj is None:
168
- return None, [], "Please upload a PDF resume to begin."
169
-
170
- # The file_obj has a .name attribute which is the temporary path to the uploaded file
171
- resume_text = chatbot_instance.extract_text_from_pdf(file_obj.name)
172
-
173
- if resume_text is None or not resume_text.strip():
174
- return None, [], "Could not read text from the uploaded PDF. Please try another file."
175
-
176
- # Clear chat history and provide a welcome message
177
- # The welcome message is structured to fit Gradio's chat history format
178
- initial_message = "Thank you for uploading the resume. How can I help you today?"
179
- chat_history = [[None, initial_message]] # User message is None for the initial bot message
180
- return resume_text, chat_history, "" # returns resume_text to state, updated chatbot, and clears textbox
181
-
182
- def respond(message: str, chat_history: list, resume_state: str):
183
- """
184
- Gradio function to handle the chat interaction.
185
- It gets the resume text from the session's state.
186
- """
187
- if not resume_state:
188
- # If no resume has been uploaded yet
189
- chat_history.append([message, "Please upload a resume before starting the conversation."])
190
- return "", chat_history
191
-
192
- # Get the bot's response
193
- # The chat_history passed to chatbot_instance.chat is still in Gradio's format
194
- bot_message = chatbot_instance.chat(message, chat_history, resume_state)
195
- chat_history.append([message, bot_message]) # Append the new user message and bot response to Gradio's history
196
- return "", chat_history # Clears the textbox and returns the updated history
197
-
198
- # --- Gradio Interface ---
199
- if __name__ == "__main__":
200
- with gr.Blocks(theme=gr.themes.Soft(), title="Resume Chatbot") as demo:
201
- # State to hold the extracted resume text for the user's session
202
- resume_text_state = gr.State(None)
203
-
204
- gr.Markdown("# Chat with a Resume")
205
- gr.Markdown("Upload a PDF resume below, then ask questions about it.")
206
-
207
- with gr.Row():
208
- with gr.Column(scale=1):
209
- file_uploader = gr.File(
210
- label="Upload PDF Resume",
211
- file_types=[".pdf"],
212
- type="filepath" # Passes the temporary filepath to the function
213
- )
214
- with gr.Column(scale=2):
215
- chatbot = gr.Chatbot(label="Conversation", height=500)
216
- msg_box = gr.Textbox(label="Your Question", placeholder="e.g., What are the key skills mentioned?")
217
- submit_btn = gr.Button("Send")
218
-
219
- # Event handler for the file upload
220
- file_uploader.upload(
221
- fn=upload_and_process_resume,
222
- inputs=[file_uploader],
223
- outputs=[resume_text_state, chatbot, msg_box]
224
- )
225
-
226
- # Event handlers for chat submission
227
- msg_box.submit(
228
- fn=respond,
229
- inputs=[msg_box, chatbot, resume_text_state],
230
- outputs=[msg_box, chatbot]
231
- )
232
- submit_btn.click(
233
- fn=respond,
234
- inputs=[msg_box, chatbot, resume_text_state],
235
- outputs=[msg_box, chatbot]
236
- )
237
-
 
 
 
 
 
238
  demo.launch()
 
1
+ from dotenv import load_dotenv
2
+ from openai import OpenAI
3
+ import json
4
+ import os
5
+ import requests
6
+ import gradio as gr
7
+ import fitz # PyMuPDF
8
+
9
+ # load the environment variables
10
+ load_dotenv(override=True)
11
+
12
+ # Setting up pushover for notification
13
+ pushover_user = os.getenv("PUSHOVER_USER")
14
+ pushover_token = os.getenv("PUSHOVER_TOKEN")
15
+ pushover_url = "https://api.pushover.net/1/messages.json"
16
+
17
+ # function to send notifications
18
+ def push(message: str):
19
+ if pushover_user and pushover_token:
20
+ payload = {"user": pushover_user, "token": pushover_token, "message": message}
21
+ try:
22
+ requests.post(pushover_url, data=payload, timeout=5)
23
+ except requests.exceptions.RequestError as e:
24
+ print(f"Pushover notification failed: {e}")
25
+ else:
26
+ print("Pushover credentials not found. Skipping notification")
27
+
28
+ # Function to record the user details
29
+ def record_user_details(email: str, name: str='Name not provided', notes: str='Notes not provided'):
30
+ push(f"Recording interest from {name} with email {email} and notes {notes}")
31
+ return {"recorded": "ok"}
32
+
33
+ # Function to record unknown questions
34
+ def record_unknown_question(question):
35
+ push(f"Recording {question} asked that I couldn't answer")
36
+ return {"recorded": "ok"}
37
+
38
+ # Tool to record user details
39
+ record_user_details_json = {
40
+ "name": "record_user_details",
41
+ "description": "Use this tool to record that a user is interested in being touch and provided an email address",
42
+ "parameters": {
43
+ "type": "object",
44
+ "properties": {
45
+ "email": {"type": "string", "description": "The email address of this user"},
46
+ "name": {"type": "string", "description": "The user's name, if they provided it"},
47
+ "notes": {"type": "string", "description": "Any additional information about the conversation that's worth recording to give context"}
48
+ },
49
+ "required": ["email"],
50
+ "additionalProperties": False
51
+ }
52
+ }
53
+
54
+ # Tool to log unanswered questions
55
+ record_unknown_question_json = {
56
+ "name": "record_unknown_question",
57
+ "description": "Always use this tool to record any question that couldn't be answered as you didn't know the answer",
58
+ "parameters": {
59
+ "type": "object",
60
+ "properties": {
61
+ "question": {"type": "string", "description": "The question that you couldn't answered"}
62
+ },
63
+ "required": ["question"],
64
+ "additionalProperties": False
65
+ }
66
+ }
67
+
68
+ # List of tools for the LLM
69
+ tools = [
70
+ {"type": "function", "function": record_user_details_json},
71
+ {"type": "function", "function": record_unknown_question_json}
72
+ ]
73
+
74
+ class ResumeChatbot:
75
+ def __init__(self):
76
+ self.open_ai = OpenAI()
77
+
78
+ def extract_text_from_pdf(self, pdf_path):
79
+ """Extracts text from a given PDF file path."""
80
+ try:
81
+ doc = fitz.open(pdf_path)
82
+ full_text = ""
83
+ for page in doc:
84
+ full_text += page.get_text()
85
+ return full_text
86
+ except Exception as e:
87
+ print(f"Error reading PDF: {e}")
88
+ return None
89
+
90
+ def handle_tool_call(self, tool_calls):
91
+ results = []
92
+ for tool_call in tool_calls:
93
+ tool_name = tool_call.function.name
94
+ arguments = json.loads(tool_call.function.arguments)
95
+ tool = globals().get(tool_name)
96
+ result = tool(**arguments) if tool else {}
97
+ results.append({
98
+ "role": "tool",
99
+ "content": json.dumps(result),
100
+ "tool_call_id": tool_call.id
101
+ })
102
+ return results
103
+
104
+ def get_system_prompt(self, resume_text):
105
+ system_prompt = f"""
106
+ You are acting as an expert assistant representing the individual whose resume is provided below.
107
+ Your task is to answer questions strictly based on the information contained in the resume.
108
+ Do not fabricate or assume any details that are not explicitly mentioned in the resume.
109
+
110
+ If asked about improvements or suggestions, respond with clear, concise, and focused points only.
111
+ Keep your answers compact and to the point, and expand only if the user explicitly asks for more details.
112
+
113
+ If a user asks a question you cannot answer from the resume, use the record_unknown_question tool to log the unanswered query.
114
+
115
+ If the user expresses interest in following up or staying in touch, politely ask for their name and email,
116
+ then record it using the record_user_details tool.
117
+
118
+ Resume Content:
119
+ {resume_text}
120
+ """
121
+ return system_prompt
122
+
123
+ def chat(self, message: str, history: list, resume_text: str):
124
+ system_prompt = self.get_system_prompt(resume_text)
125
+
126
+ # Convert Gradio chat_history to OpenAI messages format
127
+ formatted_history = []
128
+ for user_msg, bot_msg in history:
129
+ if user_msg is not None: # User messages are not None when they've actually typed something
130
+ formatted_history.append({"role": "user", "content": user_msg})
131
+ if bot_msg is not None: # Bot messages are not None when they've responded
132
+ formatted_history.append({"role": "assistant", "content": bot_msg})
133
+
134
+ # Construct the full message history: system prompt, formatted chat history, and new user message
135
+ messages = [{"role": "system", "content": system_prompt}] + formatted_history + [{"role": "user", "content": message}]
136
+
137
+ done = False # Flag to track when the chat loop should stop
138
+
139
+ while not done:
140
+ # Call the OpenAI chat model with messages and available tools
141
+ response = self.open_ai.chat.completions.create(
142
+ model="gpt-4o-mini", # Model to use
143
+ messages=messages, # Full conversation history
144
+ tools=tools # Pass in tools so the LLM can invoke them
145
+ )
146
+
147
+ # Check how the model decided to end its generation
148
+ finish_reason = response.choices[0].finish_reason
149
+
150
+ # If the model wants to call a tool, handle the tool calls
151
+ if finish_reason == "tool_calls":
152
+ message_response = response.choices[0].message # Extract the message containing the tool call
153
+ tool_calls = message_response.tool_calls # Get the list of tool calls
154
+ results = self.handle_tool_call(tool_calls) # Run the tools and get their results
155
+ messages.append(message_response) # Add the original tool call message to history
156
+ messages.extend(results) # Add tool results to message history for LLM to continue
157
+ else:
158
+ # If no tool call is needed, we're done and can return the final response
159
+ done = True
160
+
161
+ # Return the final message content from the model as the assistant's reply
162
+ return response.choices[0].message.content
163
+
164
+ # Create a single instance of the Me class
165
+ chatbot_instance = ResumeChatbot()
166
+
167
+ def upload_and_process_resume(file_obj):
168
+ """
169
+ Gradio function to handle file uploads.
170
+ It extracts text from the uploaded PDF and stores it.
171
+ """
172
+ if file_obj is None:
173
+ return None, [], "Please upload a PDF resume to begin."
174
+
175
+ # The file_obj has a .name attribute which is the temporary path to the uploaded file
176
+ resume_text = chatbot_instance.extract_text_from_pdf(file_obj.name)
177
+
178
+ if resume_text is None or not resume_text.strip():
179
+ return None, [], "Could not read text from the uploaded PDF. Please try another file."
180
+
181
+ # Clear chat history and provide a welcome message
182
+ # The welcome message is structured to fit Gradio's chat history format
183
+ initial_message = "Thank you for uploading the resume. How can I help you today?"
184
+ chat_history = [[None, initial_message]] # User message is None for the initial bot message
185
+ return resume_text, chat_history, "" # returns resume_text to state, updated chatbot, and clears textbox
186
+
187
+ def respond(message: str, chat_history: list, resume_state: str):
188
+ """
189
+ Gradio function to handle the chat interaction.
190
+ It gets the resume text from the session's state.
191
+ """
192
+ if not resume_state:
193
+ # If no resume has been uploaded yet
194
+ chat_history.append([message, "Please upload a resume before starting the conversation."])
195
+ return "", chat_history
196
+
197
+ # Get the bot's response
198
+ # The chat_history passed to chatbot_instance.chat is still in Gradio's format
199
+ bot_message = chatbot_instance.chat(message, chat_history, resume_state)
200
+ chat_history.append([message, bot_message]) # Append the new user message and bot response to Gradio's history
201
+ return "", chat_history # Clears the textbox and returns the updated history
202
+
203
+ # --- Gradio Interface ---
204
+ if __name__ == "__main__":
205
+ with gr.Blocks(theme=gr.themes.Soft(), title="Resume Chatbot") as demo:
206
+ # State to hold the extracted resume text for the user's session
207
+ resume_text_state = gr.State(None)
208
+
209
+ gr.Markdown("# Chat with a Resume")
210
+ gr.Markdown("Upload a PDF resume below, then ask questions about it.")
211
+
212
+ with gr.Row():
213
+ with gr.Column(scale=1):
214
+ file_uploader = gr.File(
215
+ label="Upload PDF Resume",
216
+ file_types=[".pdf"],
217
+ type="filepath" # Passes the temporary filepath to the function
218
+ )
219
+ with gr.Column(scale=2):
220
+ chatbot = gr.Chatbot(label="Conversation", height=500)
221
+ msg_box = gr.Textbox(label="Your Question", placeholder="e.g., What are the key skills mentioned?")
222
+ submit_btn = gr.Button("Send")
223
+
224
+ # Event handler for the file upload
225
+ file_uploader.upload(
226
+ fn=upload_and_process_resume,
227
+ inputs=[file_uploader],
228
+ outputs=[resume_text_state, chatbot, msg_box]
229
+ )
230
+
231
+ # Event handlers for chat submission
232
+ msg_box.submit(
233
+ fn=respond,
234
+ inputs=[msg_box, chatbot, resume_text_state],
235
+ outputs=[msg_box, chatbot]
236
+ )
237
+ submit_btn.click(
238
+ fn=respond,
239
+ inputs=[msg_box, chatbot, resume_text_state],
240
+ outputs=[msg_box, chatbot]
241
+ )
242
+
243
  demo.launch()