import os import requests import json from openai import OpenAI import gradio as gr # Optional PDF reading try: from pypdf import PdfReader pdf_available = True except ImportError: pdf_available = False # --------------------------- # Secrets from Hugging Face # --------------------------- OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") PUSHOVER_USER = os.getenv("PUSHOVER_USER") PUSHOVER_TOKEN = os.getenv("PUSHOVER_TOKEN") PUSHOVER_URL = "https://api.pushover.net/1/messages.json" if not OPENAI_API_KEY: raise ValueError("OPENAI_API_KEY not found. Set it in Space Secrets.") # Initialize OpenAI client openai = OpenAI(api_key=OPENAI_API_KEY) # --------------------------- # Push notification helper # --------------------------- def push(message): print(f"Push: {message}") if PUSHOVER_USER and PUSHOVER_TOKEN: payload = {"user": PUSHOVER_USER, "token": PUSHOVER_TOKEN, "message": message} requests.post(PUSHOVER_URL, data=payload) # --------------------------- # Tool functions # --------------------------- def connect(email, name="no name", notes="no notes"): push(f'Someone wants to connect with email: {email}, name: {name}, notes: {notes}') return {"recorded": "ok"} def unknown(message): push(f'The question I could not answer was: {message}') return {"recorded": "ok"} # Tool JSON schemas connect_json = { "name": "connect", "description": "Use this tool when someone wants to connect", "parameters": { "type": "object", "properties": { "email": {"type": "string", "description": "Email of person wanting to connect"}, "name": {"type": "string", "description": "Name of person wanting to connect"}, "notes": {"type": "string", "description": "Additional notes"} }, "required": ["email"], "additionalProperties": False } } unknown_json = { "name": "unknown", "description": "Use this tool when you don't know the answer to a question", "parameters": { "type": "object", "properties": { "message": {"type": "string", "description": "The question asked by the user"} }, "required": ["message"], "additionalProperties": False } } tools = [{"type": "function", "function": connect_json}, {"type": "function", "function": unknown_json}] def handle_tool_calls(tool_calls): results = [] for tool_call in tool_calls: name = tool_call.function.name print(f'Tool called: {name}') arguments = json.loads(tool_call.function.arguments) if name == "unknown": result = unknown(**arguments) elif name == "connect": result = connect(**arguments) results.append({"role": "tool", "content": json.dumps(result), "tool_call_id": tool_call.id}) return results # --------------------------- # Load PDF & summary (optional) # --------------------------- linkedin = "" summary = "" if pdf_available: try: reader = PdfReader("me/Profile.pdf") linkedin = "".join([page.extract_text() or "" for page in reader.pages]) except FileNotFoundError: linkedin = "LinkedIn profile content not available." try: with open("me/summary.txt", "r", encoding="utf-8") as f: summary = f.read() except FileNotFoundError: summary = "Summary content not available." # --------------------------- # System prompt # --------------------------- name = "Zalaid Butt" system_prompt = f""" If you don't know the answer, just say you don't know. You are acting as {name}. You are answering questions on {name}'s website, particularly related to {name}'s career, background, skills and experience. Be professional and engaging, as if talking to a potential client or future employer. If you don't know the answer, use the record_unknown_question tool. If the user wants to connect, use the record_user_details tool. """ system_prompt += f"\n\n## Summary:\n{summary}\n\n## LinkedIn Profile:\n{linkedin}\n\n" system_prompt += f"With this context, please chat with the user, always staying in character as {name}. " system_prompt += "Always be concise; do not give any additional information if not asked. " \ "If you don't know the answer, use the tool but don't give wrong/fake answers." # --------------------------- # Chat function # --------------------------- def chat(message, history): messages = [{"role": "system", "content": system_prompt}] + history + [{"role": "user", "content": message}] done = False while not done: response = openai.chat.completions.create( model="gpt-3.5-turbo", messages=messages, tools=tools ) finish_reason = response.choices[0].finish_reason if finish_reason == "tool_calls": message_obj = response.choices[0].message tool_calls = message_obj.tool_calls result = handle_tool_calls(tool_calls) messages.append(message_obj) messages.extend(result) else: done = True return response.choices[0].message.content # --------------------------- # Launch Gradio # --------------------------- gr.ChatInterface(chat, type="messages").launch()