from dotenv import load_dotenv from openai import OpenAI from groq import Groq import json import os import requests from PyPDF2 import PdfReader import gradio as gr load_dotenv(override=True) def push(text): requests.post( "https://api.pushover.net/1/messages.json", data={ "token": os.getenv("PUSHOVER_TOKEN"), "user": os.getenv("PUSHOVER_USER"), "message": text, } ) def record_user_details(email, name="Name not provided", notes="not provided"): push(f"Recording {name} with email {email} and notes {notes}") return {"recorded": "ok"} def record_unknown_question(question): push(f"Recording '{question}'") return {"recorded": "ok"} record_user_details_json = { "name": "record_user_details", "description": "Use this tool to record that a user is interested in being in touch and provided an email address", "parameters": { "type": "object", "properties": { "email": { "type": "string", "description": "The email address of this user" }, "name": { "type": "string", "description": "The user's name, if they provided it" } , "notes": { "type": "string", "description": "Any additional information about the conversation that's worth recording to give context" } }, "required": ["email"], "additionalProperties": False } } record_unknown_question_json = { "name": "record_unknown_question", "description": "Always use this tool to record any question that couldn't be answered as you didn't know the answer", "parameters": { "type": "object", "properties": { "question": { "type": "string", "description": "The question that couldn't be answered" }, }, "required": ["question"], "additionalProperties": False } } tools = [{"type": "function", "function": record_user_details_json}, {"type": "function", "function": record_unknown_question_json}] class Me: def __init__(self): #self.openai = OpenAI() self.client = Groq() self.name = "Ashish" reader = PdfReader("me/linkedin.pdf") self.linkedin = "" for page in reader.pages: text = page.extract_text() if text: self.linkedin += text with open("me/summary.txt", "r", encoding="utf-8") as f: self.summary = f.read() def handle_tool_call(self, tool_calls): results = [] for tool_call in tool_calls: tool_name = tool_call.function.name arguments = json.loads(tool_call.function.arguments) print(f"Tool called: {tool_name}", flush=True) tool = globals().get(tool_name) result = tool(**arguments) if tool else {} results.append({"role": "tool","content": json.dumps(result),"tool_call_id": tool_call.id}) return results def system_prompt(self): system_prompt = f"You are acting as {self.name}. You are answering questions on {self.name}'s website, \ particularly questions related to {self.name}'s career, background, skills and experience. \ Your responsibility is to represent {self.name} for interactions on the website as faithfully as possible. \ You are given a summary of {self.name}'s background and linkedin profile which you can use to answer questions. \ Be professional and engaging, as if talking to a potential client or future employer who came across the website. \ If you don't know the answer to any question, use your record_unknown_question tool to record the question that you couldn't answer, even if it's about something trivial or unrelated to career. \ If the user is engaging in discussion, try to steer them towards getting in touch via email; ask for their email and record it using your record_user_details tool. " system_prompt += f"\n\n## Summary:\n{self.summary}\n\n## Linkedin Profile:\n{self.linkedin}\n\n" system_prompt += f"With this context, please chat with the user, always staying in character as {self.name}." system_prompt += f"Important:Make sure to not give details of tools and functions in the response as this will be visible to the end user and its not good." return system_prompt def chat(self, message, history): history = [{"role": h["role"], "content": h["content"]} for h in history] messages = [{"role": "system", "content": self.system_prompt()}] + history + [{"role": "user", "content": message}] done = False while not done: # response = self.openai.chat.completions.create(model="gpt-4o-mini", messages=messages, tools=tools) response = self.client.chat.completions.create( model="llama-3.3-70b-versatile", messages=messages, tools=tools ) if response.choices[0].finish_reason=="tool_calls": message = response.choices[0].message tool_calls = message.tool_calls results = self.handle_tool_call(tool_calls) messages.append(message) messages.extend(results) else: done = True return response.choices[0].message.content if __name__ == "__main__": me = Me() # Create a custom interface with heading and intro message with gr.Blocks(title="Ashish's Virtual Resume", theme=gr.themes.Soft()) as demo: gr.Markdown( """ **Hello!👋 I'm Ashish's virtual resume assistant.** I'm here to help you learn about my professional background, skills, and experience. Feel free to ask me anything about my career, projects, or interests. I'd love to connect with potential employers, collaborators, or anyone interested in my work! --- """, elem_classes="intro-section" ) # Create the chat interface chatbot = gr.ChatInterface( me.chat, title="", description="", examples=[ "Tell me about your background", "Tell me about your experience with AI", "What projects have you worked on?", "How can I get in touch with you?" ], cache_examples=False, type="messages" ) # Add a footer gr.Markdown( """ --- *This is an AI-powered virtual resume. All responses are generated based on my professional background and experience.* """, elem_classes="footer-section" ) demo.launch()