Ayush239 commited on
Commit
e7e5eff
·
verified ·
1 Parent(s): 1a5bf3e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +156 -273
app.py CHANGED
@@ -1,44 +1,36 @@
 
 
 
1
 
2
-
3
- from openai import OpenAI
4
  import os
5
  import json
6
  import requests
7
  from pypdf import PdfReader
 
8
  import gradio as gr
9
- import traceback
10
 
11
- # ---------------------------
12
- # Configuration (environment)
13
- # ---------------------------
14
- OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY") # nvapi-... (NVIDIA NIM)
15
- # NVIDIA NIM compatible base URL:
16
- OPENAI_BASE_URL = "https://integrate.api.nvidia.com/v1"
17
 
18
- # Pushover (optional)
19
  PUSHOVER_TOKEN = os.environ.get("PUSHOVER_TOKEN")
20
  PUSHOVER_USER = os.environ.get("PUSHOVER_USER")
21
 
22
- # Model to call on NVIDIA NIM
23
- MODEL = "qwen/qwen2-7b-instruct"
24
-
25
- # Initialize OpenAI client (NIM via OpenAI-compatible client)
26
- # The OpenAI Python client accepts api_key and base_url in constructor.
27
- oa_client = OpenAI(api_key=OPENAI_API_KEY, base_url=OPENAI_BASE_URL)
28
-
29
- # ---------------------------
30
- # Utility: push notifications
31
- # ---------------------------
32
- def push(text: str):
33
- """
34
- Send a Pushover notification if credentials are available.
35
- If not available, just print to stdout (no failure).
36
- """
37
  try:
38
  if not PUSHOVER_TOKEN or not PUSHOVER_USER:
39
- print("Pushover not configured - message would be:", text)
40
  return
41
- resp = requests.post(
42
  "https://api.pushover.net/1/messages.json",
43
  data={
44
  "token": PUSHOVER_TOKEN,
@@ -47,275 +39,166 @@ def push(text: str):
47
  },
48
  timeout=10
49
  )
50
- if resp.status_code != 200:
51
- print("Pushover returned", resp.status_code, resp.text)
52
  except Exception as e:
53
- print("Failed sending pushover:", e)
54
-
55
- # ---------------------------
56
- # Tools definitions (JSON schemas)
57
- # ---------------------------
58
- record_user_details_json = {
59
- "name": "record_user_details",
60
- "description": "Record that a user is interested in being in touch and provided an email address",
61
- "parameters": {
62
- "type": "object",
63
- "properties": {
64
- "email": {"type": "string", "description": "The email address of this user"},
65
- "name": {"type": "string", "description": "The user's name, if provided"},
66
- "notes": {"type": "string", "description": "Any additional info about the conversation"}
67
- },
68
- "required": ["email"],
69
- "additionalProperties": False
70
- }
71
- }
72
-
73
- record_unknown_question_json = {
74
- "name": "record_unknown_question",
75
- "description": "Record any question that couldn't be answered",
76
- "parameters": {
77
- "type": "object",
78
- "properties": {
79
- "question": {"type": "string", "description": "The question that couldn't be answered"}
80
- },
81
- "required": ["question"],
82
- "additionalProperties": False
83
- }
84
- }
85
 
86
- tools = [
87
- {"type": "function", "function": record_user_details_json},
88
- {"type": "function", "function": record_unknown_question_json}
89
- ]
90
 
91
- # ---------------------------
92
- # Local tools (callable functions)
93
- # ---------------------------
94
  def record_user_details(email, name="Name not provided", notes="not provided"):
95
- push(f"Recording {name} with email {email} and notes {notes}")
96
- print(f"[TOOL] record_user_details: email={email} name={name} notes={notes}", flush=True)
97
- # Here you might append to a DB / google sheet / file. For Space demo we just return ok.
98
- return {"recorded": "ok", "email": email, "name": name, "notes": notes}
99
 
100
  def record_unknown_question(question):
101
- push(f"Recording unknown question: {question}")
102
- print(f"[TOOL] record_unknown_question: {question}", flush=True)
103
- return {"recorded": "ok", "question": question}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
 
105
- # Register tool functions in globals so they can be invoked by name
106
  globals()["record_user_details"] = record_user_details
107
  globals()["record_unknown_question"] = record_unknown_question
108
 
109
- # ---------------------------
110
- # The assistant wrapper
111
- # ---------------------------
 
112
  class Me:
113
  def __init__(self):
114
  self.name = "Ayush Tyagi"
115
- # load LinkedIn pdf and summary (if present)
116
- self.linkedin = ""
117
- self.summary = "Summary not provided."
118
- try:
119
- pdf_path = "me/Ayush_linkdin.pdf"
120
- if os.path.exists(pdf_path):
121
- reader = PdfReader(pdf_path)
122
- text = []
123
- for page in reader.pages:
124
- page_text = page.extract_text()
125
- if page_text:
126
- text.append(page_text)
127
- self.linkedin = "\n\n".join(text).strip()
128
- else:
129
- print(f"{pdf_path} not found in repo; skipping PDF loading.")
130
- except Exception as e:
131
- print("Error loading PDF:", e)
132
- traceback.print_exc()
133
-
134
- try:
135
- summary_path = "me/summary.txt"
136
- if os.path.exists(summary_path):
137
- with open(summary_path, "r", encoding="utf-8") as f:
138
- self.summary = f.read()
139
- else:
140
- print(f"{summary_path} not found in repo; using fallback summary.")
141
- except Exception as e:
142
- print("Error reading summary.txt:", e)
143
- traceback.print_exc()
144
-
145
- # Prebuild the system prompt (keeps it simple & safe)
146
- self._system_prompt = self._build_system_prompt()
147
-
148
- def _build_system_prompt(self):
149
- sp = f"""You are acting as {self.name}. Your role is to answer questions on {self.name}'s personal website,
150
- specifically those related to {self.name}'s career, background, skills, and professional experience.
151
- Represent {self.name} accurately, professionally and engagingly.
152
-
153
- If you don't know the answer to any question, say you don't know and use the record_unknown_question tool
154
- to record the question. If the user wants to stay in touch, ask for their email and use the record_user_details tool.
155
-
156
- ## Summary:
157
  {self.summary}
158
 
159
- ## LinkedIn (extracted text, if available):
160
- {self.linkedin}
161
  """
162
- return sp
163
-
164
- def system_prompt(self):
165
- return self._system_prompt
166
-
167
- def handle_tool_call(self, tool_calls):
168
- """
169
- Accepts a list of tool call objects returned by the model (tool_calls).
170
- Each tool_call is expected to have:
171
- - function.name (string)
172
- - function.arguments (JSON string)
173
- - id (optional)
174
- """
175
- results = []
176
- for tool_call in tool_calls:
177
- try:
178
- # Different client shapes exist; be defensive
179
- func_name = None
180
- func_args_json = None
181
- call_id = None
182
-
183
- # attempt several shapes
184
- if hasattr(tool_call, "function"):
185
- # tool_call.function may be a simple namespace with .name and .arguments
186
- func = tool_call.function
187
- func_name = getattr(func, "name", None) or func.get("name") if isinstance(func, dict) else func_name
188
- func_args_json = getattr(func, "arguments", None) or (func.get("arguments") if isinstance(func, dict) else None)
189
- # fallback for dict-like
190
- if not func_name and isinstance(tool_call, dict):
191
- func_name = tool_call.get("function", {}).get("name") or tool_call.get("name")
192
- func_args_json = tool_call.get("function", {}).get("arguments") or tool_call.get("arguments")
193
- call_id = tool_call.get("id")
194
-
195
- # also check top-level
196
- if not func_args_json:
197
- func_args_json = getattr(tool_call, "arguments", None) or tool_call.get("arguments") if isinstance(tool_call, dict) else func_args_json
198
-
199
- if not func_name:
200
- print("Could not determine tool name for tool_call:", tool_call)
201
- continue
202
-
203
- # parse JSON args (models often return JSON-string)
204
- args = {}
205
- if func_args_json:
206
- if isinstance(func_args_json, str):
207
- try:
208
- args = json.loads(func_args_json)
209
- except Exception:
210
- # sometimes arguments come as dict already
211
- try:
212
- args = eval(func_args_json)
213
- except Exception:
214
- args = {}
215
- elif isinstance(func_args_json, dict):
216
- args = func_args_json
217
-
218
- print(f"Tool called: {func_name} with args: {args}", flush=True)
219
- tool = globals().get(func_name)
220
- if callable(tool):
221
- result = tool(**args)
222
- else:
223
- print(f"No tool function found for {func_name}")
224
- result = {}
225
- # build tool result entry in a shape compatible with continuing the chat
226
- result_content = json.dumps(result)
227
- results.append({"role": "tool", "content": result_content, "tool_call_id": call_id})
228
- except Exception as e:
229
- print("Error during tool handling:", e)
230
- traceback.print_exc()
231
- results.append({"role": "tool", "content": json.dumps({"error": str(e)}), "tool_call_id": None})
232
- return results
233
 
 
 
 
234
  def chat(self, message, history):
235
- """
236
- Gradio ChatInterface-compatible function: (message, history) -> str
237
- history is a list of tuples (user, assistant) or a list of message dicts depending on Gradio version.
238
- We'll convert to a message list compatible with the model.
239
- """
240
- # Build messages array (OpenAI chat format)
241
- # Start with system prompt
242
  messages = [{"role": "system", "content": self.system_prompt()}]
243
 
244
- # Convert gradio history into assistant/user messages robustly:
245
- # history can be list of pairs [[user, bot], ...] or list of dict messages
246
- try:
247
- if history:
248
- # If history items are tuples/lists of length 2
249
- if isinstance(history, list) and len(history) and isinstance(history[0], (list, tuple)) and len(history[0]) == 2:
250
- for user_msg, assistant_msg in history:
251
- if user_msg:
252
- messages.append({"role": "user", "content": user_msg})
253
- if assistant_msg:
254
- messages.append({"role": "assistant", "content": assistant_msg})
255
- else:
256
- # Fallback: if history is list of dicts with role/content
257
- for item in history:
258
- if isinstance(item, dict) and "role" in item and "content" in item:
259
- messages.append({"role": item["role"], "content": item["content"]})
260
- except Exception as e:
261
- print("Failed to normalize history:", e)
262
- traceback.print_exc()
263
-
264
- # Add the latest user message
265
  messages.append({"role": "user", "content": message})
266
 
267
- # Loop to support tool-calls (the client may return tool_calls finish_reason)
268
- done = False
269
- last_response_text = "Sorry — something went wrong."
270
- try:
271
- while not done:
272
- # Call the model
273
- response = oa_client.chat.completions.create(
274
- model=MODEL,
275
- messages=messages,
276
- tools=tools,
277
- max_tokens=512
278
- )
279
- # defensive extraction
280
- choice0 = response.choices[0]
281
- finish_reason = getattr(choice0, "finish_reason", None) or (choice0.get("finish_reason") if isinstance(choice0, dict) else None)
282
- # If model asks to call tools:
283
- if finish_reason == "tool_calls" or getattr(choice0, "message", None) and getattr(choice0.message, "tool_calls", None):
284
- # extract the message object
285
- model_message = getattr(choice0, "message", None) or choice0.get("message", {})
286
- tool_calls = getattr(model_message, "tool_calls", None) or model_message.get("tool_calls", [])
287
- # handle tool calls
288
- results = self.handle_tool_call(tool_calls)
289
- # append the model message (which requested tool calls) and the tool result messages
290
- messages.append(model_message if isinstance(model_message, dict) else {"role": "assistant", "content": getattr(model_message, "content", "")})
291
- messages.extend(results)
292
- else:
293
- # final content
294
- msg_obj = getattr(choice0, "message", None) or choice0.get("message", {})
295
- content = getattr(msg_obj, "content", None) or (msg_obj.get("content") if isinstance(msg_obj, dict) else None)
296
- if not content:
297
- # Some clients put the text at choice0.text or similar
298
- content = getattr(choice0, "text", None) or (choice0.get("text") if isinstance(choice0, dict) else "")
299
- last_response_text = content or " (no content returned by model) "
300
- done = True
301
- except Exception as e:
302
- # Log and return a helpful message
303
- print("Error calling model:", e)
304
- traceback.print_exc()
305
- last_response_text = "Sorry, the model call failed. Check logs in Space build/runtime for details."
306
-
307
- return last_response_text
308
-
309
- # ---------------------------
310
- # Instantiate and run Gradio
311
- # ---------------------------
312
  me = Me()
313
 
314
- # Gradio ChatInterface (simpler)
315
- iface = gr.ChatInterface(fn=me.chat, type="messages", title="Ayush Tyagi — Personal Assistant")
 
 
 
316
 
317
  if __name__ == "__main__":
318
- # In Spaces, it's recommended to bind to 0.0.0.0 and use the PORT envvar if provided.
319
- server_name = "0.0.0.0"
320
- server_port = int(os.environ.get("PORT", 7860))
321
- iface.launch(server_name=server_name, server_port=server_port)
 
1
+ # app.py — NVIDIA NIM + Tool Calling + Gradio Chatbot
2
+ # Model: meta/llama3-8b-instruct (supports OpenAI-style tools)
3
+ # Works on HuggingFace Spaces with your nvapi-... key.
4
 
 
 
5
  import os
6
  import json
7
  import requests
8
  from pypdf import PdfReader
9
+ from openai import OpenAI
10
  import gradio as gr
 
11
 
12
+ # ===============================
13
+ # ENVIRONMENT CONFIG
14
+ # ===============================
15
+ OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY") # Your nvapi-XXXX key
16
+ BASE_URL = "https://integrate.api.nvidia.com/v1"
17
+ MODEL = "meta/llama3-8b-instruct"
18
 
 
19
  PUSHOVER_TOKEN = os.environ.get("PUSHOVER_TOKEN")
20
  PUSHOVER_USER = os.environ.get("PUSHOVER_USER")
21
 
22
+ client = OpenAI(api_key=OPENAI_API_KEY, base_url=BASE_URL)
23
+
24
+
25
+ # ===============================
26
+ # OPTIONAL: Pushover notification
27
+ # ===============================
28
+ def push(text):
 
 
 
 
 
 
 
 
29
  try:
30
  if not PUSHOVER_TOKEN or not PUSHOVER_USER:
31
+ print("Pushover not configured:", text)
32
  return
33
+ requests.post(
34
  "https://api.pushover.net/1/messages.json",
35
  data={
36
  "token": PUSHOVER_TOKEN,
 
39
  },
40
  timeout=10
41
  )
 
 
42
  except Exception as e:
43
+ print("Pushover failed:", e)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
 
 
 
 
45
 
46
+ # ===============================
47
+ # TOOL IMPLEMENTATIONS
48
+ # ===============================
49
  def record_user_details(email, name="Name not provided", notes="not provided"):
50
+ push(f"New lead → {name} | {email} | Notes: {notes}")
51
+ return {"status": "ok", "email": email, "name": name, "notes": notes}
 
 
52
 
53
  def record_unknown_question(question):
54
+ push(f"Unknown question recorded: {question}")
55
+ return {"status": "ok", "question": question}
56
+
57
+
58
+ # ===============================
59
+ # TOOL JSON DEFINITIONS
60
+ # ===============================
61
+ tools = [
62
+ {
63
+ "type": "function",
64
+ "function": {
65
+ "name": "record_user_details",
66
+ "description": "Record user interest and their email.",
67
+ "parameters": {
68
+ "type": "object",
69
+ "properties": {
70
+ "email": {"type": "string"},
71
+ "name": {"type": "string"},
72
+ "notes": {"type": "string"}
73
+ },
74
+ "required": ["email"]
75
+ }
76
+ },
77
+ },
78
+ {
79
+ "type": "function",
80
+ "function": {
81
+ "name": "record_unknown_question",
82
+ "description": "Record any question the assistant could not answer.",
83
+ "parameters": {
84
+ "type": "object",
85
+ "properties": {
86
+ "question": {"type": "string"}
87
+ },
88
+ "required": ["question"]
89
+ }
90
+ }
91
+ }
92
+ ]
93
 
 
94
  globals()["record_user_details"] = record_user_details
95
  globals()["record_unknown_question"] = record_unknown_question
96
 
97
+
98
+ # ===============================
99
+ # MAIN ASSISTANT CLASS
100
+ # ===============================
101
  class Me:
102
  def __init__(self):
103
  self.name = "Ayush Tyagi"
104
+ self.summary = ""
105
+ self.linkedin_text = ""
106
+
107
+ # Load summary text
108
+ if os.path.exists("me/summary.txt"):
109
+ self.summary = open("me/summary.txt", "r", encoding="utf-8").read()
110
+
111
+ # Load LinkedIn PDF (optional)
112
+ pdf_path = "me/Ayush_linkdin.pdf"
113
+ if os.path.exists(pdf_path):
114
+ text = []
115
+ reader = PdfReader(pdf_path)
116
+ for page in reader.pages:
117
+ page_text = page.extract_text()
118
+ if page_text:
119
+ text.append(page_text)
120
+ self.linkedin_text = "\n\n".join(text)
121
+
122
+ def system_prompt(self):
123
+ return f"""
124
+ You are acting as {self.name}. Answer questions professionally about Ayush's skills,
125
+ career, background, and experience.
126
+
127
+ If you DON'T know something → call the tool:
128
+ - record_unknown_question
129
+
130
+ If the user shows interest → ask for an email and call:
131
+ - record_user_details
132
+
133
+ Be polite, confident, friendly and helpful.
134
+
135
+ ### Summary:
 
 
 
 
 
 
 
 
 
 
136
  {self.summary}
137
 
138
+ ### LinkedIn Data:
139
+ {self.linkedin_text}
140
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141
 
142
+ # ===============================
143
+ # CHAT LOOP WITH TOOL CALLING
144
+ # ===============================
145
  def chat(self, message, history):
 
 
 
 
 
 
 
146
  messages = [{"role": "system", "content": self.system_prompt()}]
147
 
148
+ # Convert history from Gradio into chat API format
149
+ for user_msg, bot_msg in history:
150
+ if user_msg:
151
+ messages.append({"role": "user", "content": user_msg})
152
+ if bot_msg:
153
+ messages.append({"role": "assistant", "content": bot_msg})
154
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
  messages.append({"role": "user", "content": message})
156
 
157
+ while True:
158
+ response = client.chat.completions.create(
159
+ model=MODEL,
160
+ messages=messages,
161
+ tools=tools,
162
+ tool_choice="auto",
163
+ max_tokens=600
164
+ )
165
+
166
+ choice = response.choices[0]
167
+ finish = choice.finish_reason
168
+ msg = choice.message
169
+
170
+ # ---- TOOL CALL ----
171
+ if finish == "tool_calls":
172
+ for tool_call in msg.tool_calls:
173
+ func = tool_call.function
174
+ name = func.name
175
+ args = json.loads(func.arguments)
176
+
177
+ tool_fn = globals().get(name)
178
+ result = tool_fn(**args)
179
+
180
+ messages.append(msg.dict()) # append tool call request
181
+ messages.append({
182
+ "role": "tool",
183
+ "tool_call_id": tool_call.id,
184
+ "content": json.dumps(result)
185
+ })
186
+ continue # loop again and let model respond
187
+
188
+ # ---- NORMAL RESPONSE ----
189
+ return msg.content
190
+
191
+
192
+ # ===============================
193
+ # GRADIO APP
194
+ # ===============================
 
 
 
 
 
 
 
195
  me = Me()
196
 
197
+ ui = gr.ChatInterface(
198
+ fn=me.chat,
199
+ title="Ayush Tyagi — Personal Assistant",
200
+ type="messages"
201
+ )
202
 
203
  if __name__ == "__main__":
204
+ ui.launch(server_name="0.0.0.0", server_port=int(os.environ.get("PORT", 7860)))