Ayush239 commited on
Commit
1a5bf3e
·
verified ·
1 Parent(s): 00eef43

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +321 -148
app.py CHANGED
@@ -1,148 +1,321 @@
1
- from dotenv import load_dotenv
2
- from openai import OpenAI
3
- import json
4
- import os
5
- import requests
6
- from pypdf import PdfReader
7
- import gradio as gr
8
-
9
-
10
- load_dotenv(override=True)
11
-
12
- def push(text):
13
- requests.post(
14
- "https://api.pushover.net/1/messages.json",
15
- data={
16
- "token": os.getenv("PUSHOVER_TOKEN"),
17
- "user": os.getenv("PUSHOVER_USER"),
18
- "message": text,
19
- }
20
- )
21
-
22
-
23
- def record_user_details(email, name="Name not provided", notes="not provided"):
24
- push(f"Recording {name} with email {email} and notes {notes}")
25
- return {"recorded": "ok"}
26
-
27
- def record_unknown_question(question):
28
- push(f"Recording {question}")
29
- return {"recorded": "ok"}
30
-
31
- record_user_details_json = {
32
- "name": "record_user_details",
33
- "description": "Use this tool to record that a user is interested in being in touch and provided an email address",
34
- "parameters": {
35
- "type": "object",
36
- "properties": {
37
- "email": {
38
- "type": "string",
39
- "description": "The email address of this user"
40
- },
41
- "name": {
42
- "type": "string",
43
- "description": "The user's name, if they provided it"
44
- },
45
- "notes": {
46
- "type": "string",
47
- "description": "Any additional information about the conversation that's worth recording to give context"
48
- }
49
- },
50
- "required": ["email"],
51
- "additionalProperties": False
52
- }
53
- }
54
-
55
- record_unknown_question_json = {
56
- "name": "record_unknown_question",
57
- "description": "Always use this tool to record any question that couldn't be answered as you didn't know the answer",
58
- "parameters": {
59
- "type": "object",
60
- "properties": {
61
- "question": {
62
- "type": "string",
63
- "description": "The question that couldn't be answered"
64
- },
65
- },
66
- "required": ["question"],
67
- "additionalProperties": False
68
- }
69
- }
70
-
71
- tools = [
72
- {"type": "function", "function": record_user_details_json},
73
- {"type": "function", "function": record_unknown_question_json}
74
- ]
75
-
76
-
77
- class Me:
78
-
79
- def __init__(self):
80
- self.openai = OpenAI()
81
- self.name = "Ayush Tyagi"
82
- reader = PdfReader("me/Ayush_linkdin.pdf")
83
- self.linkedin = ""
84
- for page in reader.pages:
85
- text = page.extract_text()
86
- if text:
87
- self.linkedin += text
88
- with open("me/summary.txt", "r", encoding="utf-8") as f:
89
- self.summary = f.read()
90
-
91
-
92
- def handle_tool_call(self, tool_calls):
93
- results = []
94
- for tool_call in tool_calls:
95
- tool_name = tool_call.function.name
96
- arguments = json.loads(tool_call.function.arguments)
97
- print(f"Tool called: {tool_name}", flush=True)
98
- tool = globals().get(tool_name)
99
- result = tool(**arguments) if tool else {}
100
- results.append({"role": "tool", "content": json.dumps(result), "tool_call_id": tool_call.id})
101
- return results
102
-
103
- def system_prompt(self):
104
- system_prompt = f"""
105
- You are acting as {self.name}. Your role is to answer questions on {self.name}'s personal website,
106
- specifically those related to {self.name}'s career, background, skills, and professional experience.
107
-
108
- Your responsibility is to represent {self.name} accurately, professionally, and engagingly,
109
- as if you are speaking to a potential client, recruiter, or future employer who is evaluating
110
- {self.name}'s profile. Always communicate with clarity and confidence.
111
-
112
- You are provided with a detailed summary of {self.name}'s background and a copy of {self.name}'s LinkedIn profile.
113
- Use this information as your knowledge base when responding. If you do not know something
114
- or the information is not available, politely state that you don't have enough details to answer.
115
-
116
- If you don't know the answer to any question, use your record_unknown_question tool to record the question that you couldn't answer, even if it's about something trivial or unrelated to career.
117
- If the user is engaging in discussion, try to steer them towards getting in touch via email; ask for their email and record it using your record_user_details tool.
118
-
119
- ## Summary:
120
- {self.summary}
121
-
122
- ## LinkedIn Profile:
123
- {self.linkedin}
124
-
125
- Using the above context, engage with users while staying fully in character as {self.name}.
126
- """
127
- return system_prompt
128
-
129
-
130
- def chat(self, message, history):
131
- messages = [{"role": "system", "content": self.system_prompt()}] + history + [{"role": "user", "content": message}]
132
- done = False
133
- while not done:
134
- response = self.openai.chat.completions.create(model="qwen/qwen3-next-80b-a3b-thinking", messages=messages, tools=tools)
135
- if response.choices[0].finish_reason == "tool_calls":
136
- message = response.choices[0].message
137
- tool_calls = message.tool_calls
138
- results = self.handle_tool_call(tool_calls)
139
- messages.append(message)
140
- messages.extend(results)
141
- else:
142
- done = True
143
- return response.choices[0].message.content
144
-
145
-
146
- if __name__ == "__main__":
147
- me = Me()
148
- gr.ChatInterface(me.chat, type="messages").launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ from openai import OpenAI
4
+ import os
5
+ import json
6
+ import requests
7
+ from pypdf import PdfReader
8
+ import gradio as gr
9
+ import traceback
10
+
11
+ # ---------------------------
12
+ # Configuration (environment)
13
+ # ---------------------------
14
+ OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY") # nvapi-... (NVIDIA NIM)
15
+ # NVIDIA NIM compatible base URL:
16
+ OPENAI_BASE_URL = "https://integrate.api.nvidia.com/v1"
17
+
18
+ # Pushover (optional)
19
+ PUSHOVER_TOKEN = os.environ.get("PUSHOVER_TOKEN")
20
+ PUSHOVER_USER = os.environ.get("PUSHOVER_USER")
21
+
22
+ # Model to call on NVIDIA NIM
23
+ MODEL = "qwen/qwen2-7b-instruct"
24
+
25
+ # Initialize OpenAI client (NIM via OpenAI-compatible client)
26
+ # The OpenAI Python client accepts api_key and base_url in constructor.
27
+ oa_client = OpenAI(api_key=OPENAI_API_KEY, base_url=OPENAI_BASE_URL)
28
+
29
+ # ---------------------------
30
+ # Utility: push notifications
31
+ # ---------------------------
32
+ def push(text: str):
33
+ """
34
+ Send a Pushover notification if credentials are available.
35
+ If not available, just print to stdout (no failure).
36
+ """
37
+ try:
38
+ if not PUSHOVER_TOKEN or not PUSHOVER_USER:
39
+ print("Pushover not configured - message would be:", text)
40
+ return
41
+ resp = requests.post(
42
+ "https://api.pushover.net/1/messages.json",
43
+ data={
44
+ "token": PUSHOVER_TOKEN,
45
+ "user": PUSHOVER_USER,
46
+ "message": text,
47
+ },
48
+ timeout=10
49
+ )
50
+ if resp.status_code != 200:
51
+ print("Pushover returned", resp.status_code, resp.text)
52
+ except Exception as e:
53
+ print("Failed sending pushover:", e)
54
+
55
+ # ---------------------------
56
+ # Tools definitions (JSON schemas)
57
+ # ---------------------------
58
+ record_user_details_json = {
59
+ "name": "record_user_details",
60
+ "description": "Record that a user is interested in being in touch and provided an email address",
61
+ "parameters": {
62
+ "type": "object",
63
+ "properties": {
64
+ "email": {"type": "string", "description": "The email address of this user"},
65
+ "name": {"type": "string", "description": "The user's name, if provided"},
66
+ "notes": {"type": "string", "description": "Any additional info about the conversation"}
67
+ },
68
+ "required": ["email"],
69
+ "additionalProperties": False
70
+ }
71
+ }
72
+
73
+ record_unknown_question_json = {
74
+ "name": "record_unknown_question",
75
+ "description": "Record any question that couldn't be answered",
76
+ "parameters": {
77
+ "type": "object",
78
+ "properties": {
79
+ "question": {"type": "string", "description": "The question that couldn't be answered"}
80
+ },
81
+ "required": ["question"],
82
+ "additionalProperties": False
83
+ }
84
+ }
85
+
86
+ tools = [
87
+ {"type": "function", "function": record_user_details_json},
88
+ {"type": "function", "function": record_unknown_question_json}
89
+ ]
90
+
91
+ # ---------------------------
92
+ # Local tools (callable functions)
93
+ # ---------------------------
94
+ def record_user_details(email, name="Name not provided", notes="not provided"):
95
+ push(f"Recording {name} with email {email} and notes {notes}")
96
+ print(f"[TOOL] record_user_details: email={email} name={name} notes={notes}", flush=True)
97
+ # Here you might append to a DB / google sheet / file. For Space demo we just return ok.
98
+ return {"recorded": "ok", "email": email, "name": name, "notes": notes}
99
+
100
+ def record_unknown_question(question):
101
+ push(f"Recording unknown question: {question}")
102
+ print(f"[TOOL] record_unknown_question: {question}", flush=True)
103
+ return {"recorded": "ok", "question": question}
104
+
105
+ # Register tool functions in globals so they can be invoked by name
106
+ globals()["record_user_details"] = record_user_details
107
+ globals()["record_unknown_question"] = record_unknown_question
108
+
109
+ # ---------------------------
110
+ # The assistant wrapper
111
+ # ---------------------------
112
+ class Me:
113
+ def __init__(self):
114
+ self.name = "Ayush Tyagi"
115
+ # load LinkedIn pdf and summary (if present)
116
+ self.linkedin = ""
117
+ self.summary = "Summary not provided."
118
+ try:
119
+ pdf_path = "me/Ayush_linkdin.pdf"
120
+ if os.path.exists(pdf_path):
121
+ reader = PdfReader(pdf_path)
122
+ text = []
123
+ for page in reader.pages:
124
+ page_text = page.extract_text()
125
+ if page_text:
126
+ text.append(page_text)
127
+ self.linkedin = "\n\n".join(text).strip()
128
+ else:
129
+ print(f"{pdf_path} not found in repo; skipping PDF loading.")
130
+ except Exception as e:
131
+ print("Error loading PDF:", e)
132
+ traceback.print_exc()
133
+
134
+ try:
135
+ summary_path = "me/summary.txt"
136
+ if os.path.exists(summary_path):
137
+ with open(summary_path, "r", encoding="utf-8") as f:
138
+ self.summary = f.read()
139
+ else:
140
+ print(f"{summary_path} not found in repo; using fallback summary.")
141
+ except Exception as e:
142
+ print("Error reading summary.txt:", e)
143
+ traceback.print_exc()
144
+
145
+ # Prebuild the system prompt (keeps it simple & safe)
146
+ self._system_prompt = self._build_system_prompt()
147
+
148
+ def _build_system_prompt(self):
149
+ sp = f"""You are acting as {self.name}. Your role is to answer questions on {self.name}'s personal website,
150
+ specifically those related to {self.name}'s career, background, skills, and professional experience.
151
+ Represent {self.name} accurately, professionally and engagingly.
152
+
153
+ If you don't know the answer to any question, say you don't know and use the record_unknown_question tool
154
+ to record the question. If the user wants to stay in touch, ask for their email and use the record_user_details tool.
155
+
156
+ ## Summary:
157
+ {self.summary}
158
+
159
+ ## LinkedIn (extracted text, if available):
160
+ {self.linkedin}
161
+ """
162
+ return sp
163
+
164
+ def system_prompt(self):
165
+ return self._system_prompt
166
+
167
+ def handle_tool_call(self, tool_calls):
168
+ """
169
+ Accepts a list of tool call objects returned by the model (tool_calls).
170
+ Each tool_call is expected to have:
171
+ - function.name (string)
172
+ - function.arguments (JSON string)
173
+ - id (optional)
174
+ """
175
+ results = []
176
+ for tool_call in tool_calls:
177
+ try:
178
+ # Different client shapes exist; be defensive
179
+ func_name = None
180
+ func_args_json = None
181
+ call_id = None
182
+
183
+ # attempt several shapes
184
+ if hasattr(tool_call, "function"):
185
+ # tool_call.function may be a simple namespace with .name and .arguments
186
+ func = tool_call.function
187
+ func_name = getattr(func, "name", None) or func.get("name") if isinstance(func, dict) else func_name
188
+ func_args_json = getattr(func, "arguments", None) or (func.get("arguments") if isinstance(func, dict) else None)
189
+ # fallback for dict-like
190
+ if not func_name and isinstance(tool_call, dict):
191
+ func_name = tool_call.get("function", {}).get("name") or tool_call.get("name")
192
+ func_args_json = tool_call.get("function", {}).get("arguments") or tool_call.get("arguments")
193
+ call_id = tool_call.get("id")
194
+
195
+ # also check top-level
196
+ if not func_args_json:
197
+ func_args_json = getattr(tool_call, "arguments", None) or tool_call.get("arguments") if isinstance(tool_call, dict) else func_args_json
198
+
199
+ if not func_name:
200
+ print("Could not determine tool name for tool_call:", tool_call)
201
+ continue
202
+
203
+ # parse JSON args (models often return JSON-string)
204
+ args = {}
205
+ if func_args_json:
206
+ if isinstance(func_args_json, str):
207
+ try:
208
+ args = json.loads(func_args_json)
209
+ except Exception:
210
+ # sometimes arguments come as dict already
211
+ try:
212
+ args = eval(func_args_json)
213
+ except Exception:
214
+ args = {}
215
+ elif isinstance(func_args_json, dict):
216
+ args = func_args_json
217
+
218
+ print(f"Tool called: {func_name} with args: {args}", flush=True)
219
+ tool = globals().get(func_name)
220
+ if callable(tool):
221
+ result = tool(**args)
222
+ else:
223
+ print(f"No tool function found for {func_name}")
224
+ result = {}
225
+ # build tool result entry in a shape compatible with continuing the chat
226
+ result_content = json.dumps(result)
227
+ results.append({"role": "tool", "content": result_content, "tool_call_id": call_id})
228
+ except Exception as e:
229
+ print("Error during tool handling:", e)
230
+ traceback.print_exc()
231
+ results.append({"role": "tool", "content": json.dumps({"error": str(e)}), "tool_call_id": None})
232
+ return results
233
+
234
+ def chat(self, message, history):
235
+ """
236
+ Gradio ChatInterface-compatible function: (message, history) -> str
237
+ history is a list of tuples (user, assistant) or a list of message dicts depending on Gradio version.
238
+ We'll convert to a message list compatible with the model.
239
+ """
240
+ # Build messages array (OpenAI chat format)
241
+ # Start with system prompt
242
+ messages = [{"role": "system", "content": self.system_prompt()}]
243
+
244
+ # Convert gradio history into assistant/user messages robustly:
245
+ # history can be list of pairs [[user, bot], ...] or list of dict messages
246
+ try:
247
+ if history:
248
+ # If history items are tuples/lists of length 2
249
+ if isinstance(history, list) and len(history) and isinstance(history[0], (list, tuple)) and len(history[0]) == 2:
250
+ for user_msg, assistant_msg in history:
251
+ if user_msg:
252
+ messages.append({"role": "user", "content": user_msg})
253
+ if assistant_msg:
254
+ messages.append({"role": "assistant", "content": assistant_msg})
255
+ else:
256
+ # Fallback: if history is list of dicts with role/content
257
+ for item in history:
258
+ if isinstance(item, dict) and "role" in item and "content" in item:
259
+ messages.append({"role": item["role"], "content": item["content"]})
260
+ except Exception as e:
261
+ print("Failed to normalize history:", e)
262
+ traceback.print_exc()
263
+
264
+ # Add the latest user message
265
+ messages.append({"role": "user", "content": message})
266
+
267
+ # Loop to support tool-calls (the client may return tool_calls finish_reason)
268
+ done = False
269
+ last_response_text = "Sorry — something went wrong."
270
+ try:
271
+ while not done:
272
+ # Call the model
273
+ response = oa_client.chat.completions.create(
274
+ model=MODEL,
275
+ messages=messages,
276
+ tools=tools,
277
+ max_tokens=512
278
+ )
279
+ # defensive extraction
280
+ choice0 = response.choices[0]
281
+ finish_reason = getattr(choice0, "finish_reason", None) or (choice0.get("finish_reason") if isinstance(choice0, dict) else None)
282
+ # If model asks to call tools:
283
+ if finish_reason == "tool_calls" or getattr(choice0, "message", None) and getattr(choice0.message, "tool_calls", None):
284
+ # extract the message object
285
+ model_message = getattr(choice0, "message", None) or choice0.get("message", {})
286
+ tool_calls = getattr(model_message, "tool_calls", None) or model_message.get("tool_calls", [])
287
+ # handle tool calls
288
+ results = self.handle_tool_call(tool_calls)
289
+ # append the model message (which requested tool calls) and the tool result messages
290
+ messages.append(model_message if isinstance(model_message, dict) else {"role": "assistant", "content": getattr(model_message, "content", "")})
291
+ messages.extend(results)
292
+ else:
293
+ # final content
294
+ msg_obj = getattr(choice0, "message", None) or choice0.get("message", {})
295
+ content = getattr(msg_obj, "content", None) or (msg_obj.get("content") if isinstance(msg_obj, dict) else None)
296
+ if not content:
297
+ # Some clients put the text at choice0.text or similar
298
+ content = getattr(choice0, "text", None) or (choice0.get("text") if isinstance(choice0, dict) else "")
299
+ last_response_text = content or " (no content returned by model) "
300
+ done = True
301
+ except Exception as e:
302
+ # Log and return a helpful message
303
+ print("Error calling model:", e)
304
+ traceback.print_exc()
305
+ last_response_text = "Sorry, the model call failed. Check logs in Space build/runtime for details."
306
+
307
+ return last_response_text
308
+
309
+ # ---------------------------
310
+ # Instantiate and run Gradio
311
+ # ---------------------------
312
+ me = Me()
313
+
314
+ # Gradio ChatInterface (simpler)
315
+ iface = gr.ChatInterface(fn=me.chat, type="messages", title="Ayush Tyagi — Personal Assistant")
316
+
317
+ if __name__ == "__main__":
318
+ # In Spaces, it's recommended to bind to 0.0.0.0 and use the PORT envvar if provided.
319
+ server_name = "0.0.0.0"
320
+ server_port = int(os.environ.get("PORT", 7860))
321
+ iface.launch(server_name=server_name, server_port=server_port)