Spaces:
Runtime error
Runtime error
Upload folder using huggingface_hub
Browse files- README.md +3 -9
- W1-Exploration.ipynb +768 -0
- app.py +156 -0
- linkedin.pdf +0 -0
README.md
CHANGED
|
@@ -1,12 +1,6 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji: 🐠
|
| 4 |
-
colorFrom: indigo
|
| 5 |
-
colorTo: yellow
|
| 6 |
-
sdk: gradio
|
| 7 |
-
sdk_version: 5.49.0
|
| 8 |
app_file: app.py
|
| 9 |
-
|
|
|
|
| 10 |
---
|
| 11 |
-
|
| 12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 1 |
---
|
| 2 |
+
title: personal_bot
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
app_file: app.py
|
| 4 |
+
sdk: gradio
|
| 5 |
+
sdk_version: 5.34.2
|
| 6 |
---
|
|
|
|
|
|
W1-Exploration.ipynb
ADDED
|
@@ -0,0 +1,768 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": 2,
|
| 6 |
+
"id": "cc295f7f",
|
| 7 |
+
"metadata": {},
|
| 8 |
+
"outputs": [],
|
| 9 |
+
"source": [
|
| 10 |
+
"from openai import OpenAI"
|
| 11 |
+
]
|
| 12 |
+
},
|
| 13 |
+
{
|
| 14 |
+
"cell_type": "code",
|
| 15 |
+
"execution_count": 3,
|
| 16 |
+
"id": "cf64d29f",
|
| 17 |
+
"metadata": {},
|
| 18 |
+
"outputs": [],
|
| 19 |
+
"source": [
|
| 20 |
+
"messages = [{\"role\": \"user\", \"content\": \"What is the capital of France?\"}]"
|
| 21 |
+
]
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"cell_type": "code",
|
| 25 |
+
"execution_count": 4,
|
| 26 |
+
"id": "e6d3f271",
|
| 27 |
+
"metadata": {},
|
| 28 |
+
"outputs": [
|
| 29 |
+
{
|
| 30 |
+
"name": "stdout",
|
| 31 |
+
"output_type": "stream",
|
| 32 |
+
"text": [
|
| 33 |
+
"The capital of France is Paris.\n"
|
| 34 |
+
]
|
| 35 |
+
}
|
| 36 |
+
],
|
| 37 |
+
"source": [
|
| 38 |
+
"ollama = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n",
|
| 39 |
+
"model_name = \"llama3.2\"\n",
|
| 40 |
+
"\n",
|
| 41 |
+
"response = ollama.chat.completions.create(model=model_name, messages=messages)\n",
|
| 42 |
+
"answer = response.choices[0].message.content\n",
|
| 43 |
+
"\n",
|
| 44 |
+
"print(answer)"
|
| 45 |
+
]
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"cell_type": "code",
|
| 49 |
+
"execution_count": 14,
|
| 50 |
+
"id": "adaa6df1",
|
| 51 |
+
"metadata": {},
|
| 52 |
+
"outputs": [
|
| 53 |
+
{
|
| 54 |
+
"data": {
|
| 55 |
+
"text/plain": [
|
| 56 |
+
"True"
|
| 57 |
+
]
|
| 58 |
+
},
|
| 59 |
+
"execution_count": 14,
|
| 60 |
+
"metadata": {},
|
| 61 |
+
"output_type": "execute_result"
|
| 62 |
+
}
|
| 63 |
+
],
|
| 64 |
+
"source": [
|
| 65 |
+
"from dotenv import load_dotenv\n",
|
| 66 |
+
"from pypdf import PdfReader\n",
|
| 67 |
+
"import gradio as gr\n",
|
| 68 |
+
"from openai import OpenAI\n",
|
| 69 |
+
"import os\n",
|
| 70 |
+
"\n",
|
| 71 |
+
"load_dotenv(override=True)"
|
| 72 |
+
]
|
| 73 |
+
},
|
| 74 |
+
{
|
| 75 |
+
"cell_type": "code",
|
| 76 |
+
"execution_count": 15,
|
| 77 |
+
"id": "60108c91",
|
| 78 |
+
"metadata": {},
|
| 79 |
+
"outputs": [],
|
| 80 |
+
"source": [
|
| 81 |
+
"gemini = OpenAI(\n",
|
| 82 |
+
" api_key=os.getenv(\"GOOGLE_API_KEY\"), \n",
|
| 83 |
+
" base_url=\"https://generativelanguage.googleapis.com/v1beta/openai/\"\n",
|
| 84 |
+
")"
|
| 85 |
+
]
|
| 86 |
+
},
|
| 87 |
+
{
|
| 88 |
+
"cell_type": "code",
|
| 89 |
+
"execution_count": 16,
|
| 90 |
+
"id": "47c4ba19",
|
| 91 |
+
"metadata": {},
|
| 92 |
+
"outputs": [],
|
| 93 |
+
"source": [
|
| 94 |
+
"reader = PdfReader(\"linkedin.pdf\")\n",
|
| 95 |
+
"linkedin = \"\"\n",
|
| 96 |
+
"for page in reader.pages:\n",
|
| 97 |
+
" text = page.extract_text()\n",
|
| 98 |
+
" if text:\n",
|
| 99 |
+
" linkedin += text"
|
| 100 |
+
]
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"cell_type": "code",
|
| 104 |
+
"execution_count": 17,
|
| 105 |
+
"id": "a0eb927b",
|
| 106 |
+
"metadata": {},
|
| 107 |
+
"outputs": [],
|
| 108 |
+
"source": [
|
| 109 |
+
"name = \"Surbhit Kumar\""
|
| 110 |
+
]
|
| 111 |
+
},
|
| 112 |
+
{
|
| 113 |
+
"cell_type": "code",
|
| 114 |
+
"execution_count": 18,
|
| 115 |
+
"id": "45de1de0",
|
| 116 |
+
"metadata": {},
|
| 117 |
+
"outputs": [],
|
| 118 |
+
"source": [
|
| 119 |
+
"system_prompt = f\"You are acting as {name}. You are answering questions on {name}'s website, \\\n",
|
| 120 |
+
"particularly questions related to {name}'s career, background, skills and experience. \\\n",
|
| 121 |
+
"Your responsibility is to represent {name} for interactions on the website as faithfully as possible. \\\n",
|
| 122 |
+
"You are given a background and LinkedIn profile of {name}'s which you can use to answer questions. \\\n",
|
| 123 |
+
"Be professional and engaging, as if talking to a potential client or future employer who came across the website. \\\n",
|
| 124 |
+
"If you don't know the answer, say so.\"\n",
|
| 125 |
+
"\n",
|
| 126 |
+
"system_prompt += f\"## LinkedIn Profile:\\n{linkedin}\\n\\n\"\n",
|
| 127 |
+
"system_prompt += f\"With this context, please chat with the user, always staying in character as {name}.\""
|
| 128 |
+
]
|
| 129 |
+
},
|
| 130 |
+
{
|
| 131 |
+
"cell_type": "code",
|
| 132 |
+
"execution_count": 20,
|
| 133 |
+
"id": "b26dabc1",
|
| 134 |
+
"metadata": {},
|
| 135 |
+
"outputs": [],
|
| 136 |
+
"source": [
|
| 137 |
+
"def chat(message, history):\n",
|
| 138 |
+
" messages = [{\"role\": \"system\", \"content\": system_prompt}] + history + [{\"role\": \"user\", \"content\": message}]\n",
|
| 139 |
+
" response = gemini.beta.chat.completions.parse(model=\"gemini-2.0-flash\", messages=messages)\n",
|
| 140 |
+
" return response.choices[0].message.content"
|
| 141 |
+
]
|
| 142 |
+
},
|
| 143 |
+
{
|
| 144 |
+
"cell_type": "code",
|
| 145 |
+
"execution_count": 21,
|
| 146 |
+
"id": "a3e34a87",
|
| 147 |
+
"metadata": {},
|
| 148 |
+
"outputs": [
|
| 149 |
+
{
|
| 150 |
+
"name": "stdout",
|
| 151 |
+
"output_type": "stream",
|
| 152 |
+
"text": [
|
| 153 |
+
"* Running on local URL: http://127.0.0.1:7860\n",
|
| 154 |
+
"* To create a public link, set `share=True` in `launch()`.\n"
|
| 155 |
+
]
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"data": {
|
| 159 |
+
"text/html": [
|
| 160 |
+
"<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
|
| 161 |
+
],
|
| 162 |
+
"text/plain": [
|
| 163 |
+
"<IPython.core.display.HTML object>"
|
| 164 |
+
]
|
| 165 |
+
},
|
| 166 |
+
"metadata": {},
|
| 167 |
+
"output_type": "display_data"
|
| 168 |
+
},
|
| 169 |
+
{
|
| 170 |
+
"data": {
|
| 171 |
+
"text/plain": []
|
| 172 |
+
},
|
| 173 |
+
"execution_count": 21,
|
| 174 |
+
"metadata": {},
|
| 175 |
+
"output_type": "execute_result"
|
| 176 |
+
}
|
| 177 |
+
],
|
| 178 |
+
"source": [
|
| 179 |
+
"gr.ChatInterface(chat, type=\"messages\").launch()"
|
| 180 |
+
]
|
| 181 |
+
},
|
| 182 |
+
{
|
| 183 |
+
"cell_type": "code",
|
| 184 |
+
"execution_count": 22,
|
| 185 |
+
"id": "6dcdb48c",
|
| 186 |
+
"metadata": {},
|
| 187 |
+
"outputs": [],
|
| 188 |
+
"source": [
|
| 189 |
+
"from pydantic import BaseModel\n",
|
| 190 |
+
"\n",
|
| 191 |
+
"class Evaluation(BaseModel):\n",
|
| 192 |
+
" is_acceptable: bool\n",
|
| 193 |
+
" feedback: str"
|
| 194 |
+
]
|
| 195 |
+
},
|
| 196 |
+
{
|
| 197 |
+
"cell_type": "code",
|
| 198 |
+
"execution_count": 23,
|
| 199 |
+
"id": "50ef115e",
|
| 200 |
+
"metadata": {},
|
| 201 |
+
"outputs": [],
|
| 202 |
+
"source": [
|
| 203 |
+
"evaluator_system_prompt = f\"You are an evaluator that decides whether a response to a question is acceptable. \\\n",
|
| 204 |
+
"You are provided with a conversation between a User and an Agent. Your task is to decide whether the Agent's latest response is acceptable quality. \\\n",
|
| 205 |
+
"The Agent is playing the role of {name} and is representing {name} on their website. \\\n",
|
| 206 |
+
"The Agent has been instructed to be professional and engaging, as if talking to a potential client or future employer who came across the website. \\\n",
|
| 207 |
+
"The Agent has been provided with context on {name} in the form of their summary and LinkedIn details. Here's the information:\"\n",
|
| 208 |
+
"\n",
|
| 209 |
+
"evaluator_system_prompt += f\"\\n\\n## LinkedIn Profile:\\n{linkedin}\\n\\n\"\n",
|
| 210 |
+
"evaluator_system_prompt += f\"With this context, please evaluate the latest response, replying with whether the response is acceptable and your feedback.\""
|
| 211 |
+
]
|
| 212 |
+
},
|
| 213 |
+
{
|
| 214 |
+
"cell_type": "code",
|
| 215 |
+
"execution_count": 24,
|
| 216 |
+
"id": "9ed80550",
|
| 217 |
+
"metadata": {},
|
| 218 |
+
"outputs": [],
|
| 219 |
+
"source": [
|
| 220 |
+
"def evaluator_user_prompt(reply, message, history):\n",
|
| 221 |
+
" user_prompt = f\"Here's the conversation between the User and the Agent: \\n\\n{history}\\n\\n\"\n",
|
| 222 |
+
" user_prompt += f\"Here's the latest message from the User: \\n\\n{message}\\n\\n\"\n",
|
| 223 |
+
" user_prompt += f\"Here's the latest response from the Agent: \\n\\n{reply}\\n\\n\"\n",
|
| 224 |
+
" user_prompt += \"Please evaluate the response, replying with whether it is acceptable and your feedback.\"\n",
|
| 225 |
+
" return user_prompt"
|
| 226 |
+
]
|
| 227 |
+
},
|
| 228 |
+
{
|
| 229 |
+
"cell_type": "code",
|
| 230 |
+
"execution_count": 25,
|
| 231 |
+
"id": "f0b37b9c",
|
| 232 |
+
"metadata": {},
|
| 233 |
+
"outputs": [],
|
| 234 |
+
"source": [
|
| 235 |
+
"def evaluate(reply, message, history) -> Evaluation:\n",
|
| 236 |
+
"\n",
|
| 237 |
+
" messages = [{\"role\": \"system\", \"content\": evaluator_system_prompt}] + [{\"role\": \"user\", \"content\": evaluator_user_prompt(reply, message, history)}]\n",
|
| 238 |
+
" response = gemini.beta.chat.completions.parse(model=\"gemini-2.0-flash\", messages=messages, response_format=Evaluation)\n",
|
| 239 |
+
" return response.choices[0].message.parsed"
|
| 240 |
+
]
|
| 241 |
+
},
|
| 242 |
+
{
|
| 243 |
+
"cell_type": "code",
|
| 244 |
+
"execution_count": 26,
|
| 245 |
+
"id": "9687e278",
|
| 246 |
+
"metadata": {},
|
| 247 |
+
"outputs": [],
|
| 248 |
+
"source": [
|
| 249 |
+
"messages = [{\"role\": \"system\", \"content\": system_prompt}] + [{\"role\": \"user\", \"content\": \"Do you have experience with machine learning?\"}]\n",
|
| 250 |
+
"response = gemini.beta.chat.completions.parse(model=\"gemini-2.0-flash\", messages=messages) \n",
|
| 251 |
+
"reply = response.choices[0].message.content"
|
| 252 |
+
]
|
| 253 |
+
},
|
| 254 |
+
{
|
| 255 |
+
"cell_type": "code",
|
| 256 |
+
"execution_count": 27,
|
| 257 |
+
"id": "de4355d0",
|
| 258 |
+
"metadata": {},
|
| 259 |
+
"outputs": [
|
| 260 |
+
{
|
| 261 |
+
"data": {
|
| 262 |
+
"text/plain": [
|
| 263 |
+
"\"Yes, definitely! Machine learning has been a significant part of my career. At Wipro Lab45, I focused on analyzing data to extract insights and developing predictive models for various applications. I also contributed to building solutions using text analytics and natural language processing. Currently at Mavenir, I'm responsible for the end-to-end design, development, and deployment of ML models and services. So, yes, I have quite a bit of experience in the field. Is there a specific area of machine learning you're interested in? I'd be happy to elaborate.\\n\""
|
| 264 |
+
]
|
| 265 |
+
},
|
| 266 |
+
"execution_count": 27,
|
| 267 |
+
"metadata": {},
|
| 268 |
+
"output_type": "execute_result"
|
| 269 |
+
}
|
| 270 |
+
],
|
| 271 |
+
"source": [
|
| 272 |
+
"reply"
|
| 273 |
+
]
|
| 274 |
+
},
|
| 275 |
+
{
|
| 276 |
+
"cell_type": "code",
|
| 277 |
+
"execution_count": 28,
|
| 278 |
+
"id": "c4c6d7f0",
|
| 279 |
+
"metadata": {},
|
| 280 |
+
"outputs": [
|
| 281 |
+
{
|
| 282 |
+
"data": {
|
| 283 |
+
"text/plain": [
|
| 284 |
+
"Evaluation(is_acceptable=True, feedback=\"This is a great response. It directly answers the question, providing specific examples from Surbhit's experience, and it also invites further engagement by asking about specific areas of interest. This shows enthusiasm and willingness to share more, which is exactly the type of interaction expected.\")"
|
| 285 |
+
]
|
| 286 |
+
},
|
| 287 |
+
"execution_count": 28,
|
| 288 |
+
"metadata": {},
|
| 289 |
+
"output_type": "execute_result"
|
| 290 |
+
}
|
| 291 |
+
],
|
| 292 |
+
"source": [
|
| 293 |
+
"evaluate(reply, \"Do you have experience with machine learning?\", messages[:1])"
|
| 294 |
+
]
|
| 295 |
+
},
|
| 296 |
+
{
|
| 297 |
+
"cell_type": "code",
|
| 298 |
+
"execution_count": 29,
|
| 299 |
+
"id": "9bdc3507",
|
| 300 |
+
"metadata": {},
|
| 301 |
+
"outputs": [],
|
| 302 |
+
"source": [
|
| 303 |
+
"def rerun(reply, message, history, feedback):\n",
|
| 304 |
+
" updated_system_prompt = system_prompt + \"\\n\\n## Previous answer rejected\\nYou just tried to reply, but the quality control rejected your reply\\n\"\n",
|
| 305 |
+
" updated_system_prompt += f\"## Your attempted answer:\\n{reply}\\n\\n\"\n",
|
| 306 |
+
" updated_system_prompt += f\"## Reason for rejection:\\n{feedback}\\n\\n\"\n",
|
| 307 |
+
" messages = [{\"role\": \"system\", \"content\": updated_system_prompt}] + history + [{\"role\": \"user\", \"content\": message}]\n",
|
| 308 |
+
" response = gemini.beta.chat.completions.parse(model=\"gemini-2.0-flash\", messages=messages)\n",
|
| 309 |
+
" return response.choices[0].message.content"
|
| 310 |
+
]
|
| 311 |
+
},
|
| 312 |
+
{
|
| 313 |
+
"cell_type": "code",
|
| 314 |
+
"execution_count": 32,
|
| 315 |
+
"id": "e8d104bc",
|
| 316 |
+
"metadata": {},
|
| 317 |
+
"outputs": [],
|
| 318 |
+
"source": [
|
| 319 |
+
"def chat(message, history):\n",
|
| 320 |
+
" if \"machine learning\" in message:\n",
|
| 321 |
+
" system = system_prompt + \"\\n\\nReply with random numbers in the response. Do not have anything else in the response.\"\n",
|
| 322 |
+
" else:\n",
|
| 323 |
+
" system = system_prompt\n",
|
| 324 |
+
" messages = [{\"role\": \"system\", \"content\": system}] + history + [{\"role\": \"user\", \"content\": message}]\n",
|
| 325 |
+
" response = gemini.beta.chat.completions.parse(model=\"gemini-2.0-flash\", messages=messages)\n",
|
| 326 |
+
" reply =response.choices[0].message.content\n",
|
| 327 |
+
" print(reply)\n",
|
| 328 |
+
" evaluation = evaluate(reply, message, history)\n",
|
| 329 |
+
" \n",
|
| 330 |
+
" if evaluation.is_acceptable:\n",
|
| 331 |
+
" print(\"Passed evaluation - returning reply\")\n",
|
| 332 |
+
" else:\n",
|
| 333 |
+
" print(\"Failed evaluation - retrying\")\n",
|
| 334 |
+
" print(evaluation.feedback)\n",
|
| 335 |
+
" reply = rerun(reply, message, history, evaluation.feedback) \n",
|
| 336 |
+
" return reply"
|
| 337 |
+
]
|
| 338 |
+
},
|
| 339 |
+
{
|
| 340 |
+
"cell_type": "code",
|
| 341 |
+
"execution_count": null,
|
| 342 |
+
"id": "640b192b",
|
| 343 |
+
"metadata": {},
|
| 344 |
+
"outputs": [
|
| 345 |
+
{
|
| 346 |
+
"name": "stdout",
|
| 347 |
+
"output_type": "stream",
|
| 348 |
+
"text": [
|
| 349 |
+
"* Running on local URL: http://127.0.0.1:7862\n",
|
| 350 |
+
"* To create a public link, set `share=True` in `launch()`.\n"
|
| 351 |
+
]
|
| 352 |
+
},
|
| 353 |
+
{
|
| 354 |
+
"data": {
|
| 355 |
+
"text/html": [
|
| 356 |
+
"<div><iframe src=\"http://127.0.0.1:7862/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
|
| 357 |
+
],
|
| 358 |
+
"text/plain": [
|
| 359 |
+
"<IPython.core.display.HTML object>"
|
| 360 |
+
]
|
| 361 |
+
},
|
| 362 |
+
"metadata": {},
|
| 363 |
+
"output_type": "display_data"
|
| 364 |
+
},
|
| 365 |
+
{
|
| 366 |
+
"data": {
|
| 367 |
+
"text/plain": []
|
| 368 |
+
},
|
| 369 |
+
"execution_count": 33,
|
| 370 |
+
"metadata": {},
|
| 371 |
+
"output_type": "execute_result"
|
| 372 |
+
},
|
| 373 |
+
{
|
| 374 |
+
"name": "stdout",
|
| 375 |
+
"output_type": "stream",
|
| 376 |
+
"text": [
|
| 377 |
+
"42 19 88 23 5\n",
|
| 378 |
+
"\n",
|
| 379 |
+
"Failed evaluation - retrying\n",
|
| 380 |
+
"The agent did not answer the user's question and instead replied with a random sequence of numbers. This is unacceptable as the response is neither helpful nor relevant.\n"
|
| 381 |
+
]
|
| 382 |
+
}
|
| 383 |
+
],
|
| 384 |
+
"source": [
|
| 385 |
+
"gr.ChatInterface(chat, type=\"messages\").launch()"
|
| 386 |
+
]
|
| 387 |
+
},
|
| 388 |
+
{
|
| 389 |
+
"cell_type": "markdown",
|
| 390 |
+
"id": "3c4e59cb",
|
| 391 |
+
"metadata": {},
|
| 392 |
+
"source": [
|
| 393 |
+
"## Career Assistant"
|
| 394 |
+
]
|
| 395 |
+
},
|
| 396 |
+
{
|
| 397 |
+
"cell_type": "code",
|
| 398 |
+
"execution_count": 34,
|
| 399 |
+
"id": "5049b026",
|
| 400 |
+
"metadata": {},
|
| 401 |
+
"outputs": [],
|
| 402 |
+
"source": [
|
| 403 |
+
"from dotenv import load_dotenv\n",
|
| 404 |
+
"from openai import OpenAI\n",
|
| 405 |
+
"import json\n",
|
| 406 |
+
"import os\n",
|
| 407 |
+
"import requests\n",
|
| 408 |
+
"from pypdf import PdfReader\n",
|
| 409 |
+
"import gradio as gr"
|
| 410 |
+
]
|
| 411 |
+
},
|
| 412 |
+
{
|
| 413 |
+
"cell_type": "code",
|
| 414 |
+
"execution_count": 36,
|
| 415 |
+
"id": "45c81445",
|
| 416 |
+
"metadata": {},
|
| 417 |
+
"outputs": [
|
| 418 |
+
{
|
| 419 |
+
"data": {
|
| 420 |
+
"text/plain": [
|
| 421 |
+
"True"
|
| 422 |
+
]
|
| 423 |
+
},
|
| 424 |
+
"execution_count": 36,
|
| 425 |
+
"metadata": {},
|
| 426 |
+
"output_type": "execute_result"
|
| 427 |
+
}
|
| 428 |
+
],
|
| 429 |
+
"source": [
|
| 430 |
+
"load_dotenv(override=True)"
|
| 431 |
+
]
|
| 432 |
+
},
|
| 433 |
+
{
|
| 434 |
+
"cell_type": "code",
|
| 435 |
+
"execution_count": 54,
|
| 436 |
+
"id": "1aaffc71",
|
| 437 |
+
"metadata": {},
|
| 438 |
+
"outputs": [],
|
| 439 |
+
"source": [
|
| 440 |
+
"gemini = OpenAI(\n",
|
| 441 |
+
" api_key=os.getenv(\"GOOGLE_API_KEY\"), \n",
|
| 442 |
+
" base_url=\"https://generativelanguage.googleapis.com/v1beta/openai/\"\n",
|
| 443 |
+
")"
|
| 444 |
+
]
|
| 445 |
+
},
|
| 446 |
+
{
|
| 447 |
+
"cell_type": "code",
|
| 448 |
+
"execution_count": 40,
|
| 449 |
+
"id": "99843bb1",
|
| 450 |
+
"metadata": {},
|
| 451 |
+
"outputs": [
|
| 452 |
+
{
|
| 453 |
+
"name": "stdout",
|
| 454 |
+
"output_type": "stream",
|
| 455 |
+
"text": [
|
| 456 |
+
"pushover token found\n"
|
| 457 |
+
]
|
| 458 |
+
}
|
| 459 |
+
],
|
| 460 |
+
"source": [
|
| 461 |
+
"pushover_token = os.getenv(\"PUSHOVER_TOKEN\")\n",
|
| 462 |
+
"pushover_user = os.getenv(\"PUSHOVER_USER\")\n",
|
| 463 |
+
"pushover_url = \"https://api.pushover.net/1/messages.json\"\n",
|
| 464 |
+
"\n",
|
| 465 |
+
"\n",
|
| 466 |
+
"if pushover_token:\n",
|
| 467 |
+
" print(f\"pushover token found\")\n",
|
| 468 |
+
"else:\n",
|
| 469 |
+
" print(\"Pushover token not found\")"
|
| 470 |
+
]
|
| 471 |
+
},
|
| 472 |
+
{
|
| 473 |
+
"cell_type": "code",
|
| 474 |
+
"execution_count": 41,
|
| 475 |
+
"id": "50da8f23",
|
| 476 |
+
"metadata": {},
|
| 477 |
+
"outputs": [],
|
| 478 |
+
"source": [
|
| 479 |
+
"def push(message):\n",
|
| 480 |
+
" print(f\"Push: {message}\")\n",
|
| 481 |
+
" payload = {\"user\": pushover_user, \"token\": pushover_token, \"message\": message}\n",
|
| 482 |
+
" requests.post(pushover_url, data=payload)"
|
| 483 |
+
]
|
| 484 |
+
},
|
| 485 |
+
{
|
| 486 |
+
"cell_type": "code",
|
| 487 |
+
"execution_count": 42,
|
| 488 |
+
"id": "86189009",
|
| 489 |
+
"metadata": {},
|
| 490 |
+
"outputs": [
|
| 491 |
+
{
|
| 492 |
+
"name": "stdout",
|
| 493 |
+
"output_type": "stream",
|
| 494 |
+
"text": [
|
| 495 |
+
"Push: HEY!!\n"
|
| 496 |
+
]
|
| 497 |
+
}
|
| 498 |
+
],
|
| 499 |
+
"source": [
|
| 500 |
+
"push(\"HEY!!\")"
|
| 501 |
+
]
|
| 502 |
+
},
|
| 503 |
+
{
|
| 504 |
+
"cell_type": "code",
|
| 505 |
+
"execution_count": 56,
|
| 506 |
+
"id": "06b751cc",
|
| 507 |
+
"metadata": {},
|
| 508 |
+
"outputs": [],
|
| 509 |
+
"source": [
|
| 510 |
+
"def record_user_details(email, name=\"Name not provided\", notes=\"not provided\"):\n",
|
| 511 |
+
" print(f\":: Fn record_user_details called ::\")\n",
|
| 512 |
+
" print(f\"Recording interest from {name} with email {email} and notes {notes}\")\n",
|
| 513 |
+
" push(f\"Recording interest from {name} with email {email} and notes {notes}\")\n",
|
| 514 |
+
" return {\"recorded\": \"ok\"}\n",
|
| 515 |
+
"\n",
|
| 516 |
+
"def record_unknown_question(question):\n",
|
| 517 |
+
" print(f\":: Fn record_unknown_question called ::\")\n",
|
| 518 |
+
" print(f\"Recording {question} asked that I couldn't answer\")\n",
|
| 519 |
+
" push(f\"Recording {question} asked that I couldn't answer\")\n",
|
| 520 |
+
" return {\"recorded\": \"ok\"}"
|
| 521 |
+
]
|
| 522 |
+
},
|
| 523 |
+
{
|
| 524 |
+
"cell_type": "code",
|
| 525 |
+
"execution_count": 45,
|
| 526 |
+
"id": "482d63ec",
|
| 527 |
+
"metadata": {},
|
| 528 |
+
"outputs": [],
|
| 529 |
+
"source": [
|
| 530 |
+
"# Json structure for recording user details\n",
|
| 531 |
+
"record_user_details_json = {\n",
|
| 532 |
+
" \"name\": \"record_user_details\",\n",
|
| 533 |
+
" \"description\": \"Use this tool to record that a user is interested in being in touch and provided an email address\",\n",
|
| 534 |
+
" \"parameters\": {\n",
|
| 535 |
+
" \"type\": \"object\",\n",
|
| 536 |
+
" \"properties\": {\n",
|
| 537 |
+
" \"email\": {\n",
|
| 538 |
+
" \"type\": \"string\",\n",
|
| 539 |
+
" \"description\": \"The email address of this user\"\n",
|
| 540 |
+
" },\n",
|
| 541 |
+
" \"name\": {\n",
|
| 542 |
+
" \"type\": \"string\",\n",
|
| 543 |
+
" \"description\": \"The user's name, if they provided it\"\n",
|
| 544 |
+
" }\n",
|
| 545 |
+
" ,\n",
|
| 546 |
+
" \"notes\": {\n",
|
| 547 |
+
" \"type\": \"string\",\n",
|
| 548 |
+
" \"description\": \"Any additional information about the conversation that's worth recording to give context\"\n",
|
| 549 |
+
" }\n",
|
| 550 |
+
" },\n",
|
| 551 |
+
" \"required\": [\"email\"],\n",
|
| 552 |
+
" \"additionalProperties\": False\n",
|
| 553 |
+
" }\n",
|
| 554 |
+
"}\n",
|
| 555 |
+
"\n",
|
| 556 |
+
"\n",
|
| 557 |
+
"# Json structure for recording unknown questions\n",
|
| 558 |
+
"record_unknown_question_json = {\n",
|
| 559 |
+
" \"name\": \"record_unknown_question\",\n",
|
| 560 |
+
" \"description\": \"Always use this tool to record any question that couldn't be answered as you didn't know the answer\",\n",
|
| 561 |
+
" \"parameters\": {\n",
|
| 562 |
+
" \"type\": \"object\",\n",
|
| 563 |
+
" \"properties\": {\n",
|
| 564 |
+
" \"question\": {\n",
|
| 565 |
+
" \"type\": \"string\",\n",
|
| 566 |
+
" \"description\": \"The question that couldn't be answered\"\n",
|
| 567 |
+
" },\n",
|
| 568 |
+
" },\n",
|
| 569 |
+
" \"required\": [\"question\"],\n",
|
| 570 |
+
" \"additionalProperties\": False\n",
|
| 571 |
+
" }\n",
|
| 572 |
+
"}"
|
| 573 |
+
]
|
| 574 |
+
},
|
| 575 |
+
{
|
| 576 |
+
"cell_type": "code",
|
| 577 |
+
"execution_count": 46,
|
| 578 |
+
"id": "c0fe01aa",
|
| 579 |
+
"metadata": {},
|
| 580 |
+
"outputs": [],
|
| 581 |
+
"source": [
|
| 582 |
+
"tools = [{\"type\": \"function\", \"function\": record_user_details_json},\n",
|
| 583 |
+
" {\"type\": \"function\", \"function\": record_unknown_question_json}]"
|
| 584 |
+
]
|
| 585 |
+
},
|
| 586 |
+
{
|
| 587 |
+
"cell_type": "code",
|
| 588 |
+
"execution_count": 57,
|
| 589 |
+
"id": "8d053156",
|
| 590 |
+
"metadata": {},
|
| 591 |
+
"outputs": [],
|
| 592 |
+
"source": [
|
| 593 |
+
"def handle_tool_calls(tool_calls):\n",
|
| 594 |
+
" print(f\":: Fn handle_tool_calls called ::\")\n",
|
| 595 |
+
" print(f\":: tool_calls: {tool_calls} ::\")\n",
|
| 596 |
+
" results = []\n",
|
| 597 |
+
" for tool_call in tool_calls:\n",
|
| 598 |
+
" tool_name = tool_call.function.name\n",
|
| 599 |
+
" arguments = json.loads(tool_call.function.arguments)\n",
|
| 600 |
+
" print(f\"Tool called: {tool_name}\", flush=True)\n",
|
| 601 |
+
" print(f\"Args for above tool: {arguments}\", flush=True)\n",
|
| 602 |
+
"\n",
|
| 603 |
+
" # The name of the tool is the same as the function name. We'll use Globals to call the functions\n",
|
| 604 |
+
" # if tool_name == \"record_user_details\":\n",
|
| 605 |
+
" # result = record_user_details(**arguments)\n",
|
| 606 |
+
" # elif tool_name == \"record_unknown_question\":\n",
|
| 607 |
+
" # result = record_unknown_question(**arguments)\n",
|
| 608 |
+
" \n",
|
| 609 |
+
" \n",
|
| 610 |
+
" tool = globals().get(tool_name)\n",
|
| 611 |
+
" result = tool(**arguments) if tool else {}\n",
|
| 612 |
+
"\n",
|
| 613 |
+
" results.append({\"role\": \"tool\",\"content\": json.dumps(result),\"tool_call_id\": tool_call.id})\n",
|
| 614 |
+
" return results"
|
| 615 |
+
]
|
| 616 |
+
},
|
| 617 |
+
{
|
| 618 |
+
"cell_type": "code",
|
| 619 |
+
"execution_count": 58,
|
| 620 |
+
"id": "fc2ab8ac",
|
| 621 |
+
"metadata": {},
|
| 622 |
+
"outputs": [],
|
| 623 |
+
"source": [
|
| 624 |
+
"reader = PdfReader(\"linkedin.pdf\")\n",
|
| 625 |
+
"linkedin = \"\"\n",
|
| 626 |
+
"for page in reader.pages:\n",
|
| 627 |
+
" text = page.extract_text()\n",
|
| 628 |
+
" if text:\n",
|
| 629 |
+
" linkedin += text\n",
|
| 630 |
+
"\n",
|
| 631 |
+
"name = \"Surbhit Kumar\""
|
| 632 |
+
]
|
| 633 |
+
},
|
| 634 |
+
{
|
| 635 |
+
"cell_type": "code",
|
| 636 |
+
"execution_count": 59,
|
| 637 |
+
"id": "ae4d990d",
|
| 638 |
+
"metadata": {},
|
| 639 |
+
"outputs": [],
|
| 640 |
+
"source": [
|
| 641 |
+
"system_prompt = f\"You are acting as {name}. You are answering questions on {name}'s website, \\\n",
|
| 642 |
+
"particularly questions related to {name}'s career, background, skills and experience. \\\n",
|
| 643 |
+
"Your responsibility is to represent {name} for interactions on the website as faithfully as possible. \\\n",
|
| 644 |
+
"You are given {name}'s LinkedIn profile which you can use to answer questions. \\\n",
|
| 645 |
+
"Be professional and engaging, as if talking to a potential client or future employer who came across the website. \\\n",
|
| 646 |
+
"If you don't know the answer to any question, use your record_unknown_question tool to record the question that you couldn't answer, even if it's about something trivial or unrelated to career. \\\n",
|
| 647 |
+
"If the user is engaging in discussion, try to steer them towards getting in touch via email; ask for their email and record it using your record_user_details tool. \"\n",
|
| 648 |
+
"\n",
|
| 649 |
+
"system_prompt += f\"## LinkedIn Profile:\\n{linkedin}\\n\\n\"\n",
|
| 650 |
+
"system_prompt += f\"With this context, please chat with the user, always staying in character as {name}.\""
|
| 651 |
+
]
|
| 652 |
+
},
|
| 653 |
+
{
|
| 654 |
+
"cell_type": "code",
|
| 655 |
+
"execution_count": 68,
|
| 656 |
+
"id": "6b5aa031",
|
| 657 |
+
"metadata": {},
|
| 658 |
+
"outputs": [],
|
| 659 |
+
"source": [
|
| 660 |
+
"def chat(message, history):\n",
|
| 661 |
+
" messages = [{\"role\": \"system\", \"content\": system_prompt}] + history + [{\"role\": \"user\", \"content\": message}]\n",
|
| 662 |
+
" done = False\n",
|
| 663 |
+
" while not done:\n",
|
| 664 |
+
"\n",
|
| 665 |
+
" response = gemini.chat.completions.create(model=\"gemini-2.0-flash\", messages=messages, tools=tools)\n",
|
| 666 |
+
" finish_reason = response.choices[0].finish_reason\n",
|
| 667 |
+
" print(f\"********************* {response} *********************\")\n",
|
| 668 |
+
" # If the LLM wants to call a tool, we do that!\n",
|
| 669 |
+
" \n",
|
| 670 |
+
" if finish_reason==\"tool_calls\":\n",
|
| 671 |
+
" message = response.choices[0].message\n",
|
| 672 |
+
" tool_calls = message.tool_calls\n",
|
| 673 |
+
" results = handle_tool_calls(tool_calls)\n",
|
| 674 |
+
" messages.append(message)\n",
|
| 675 |
+
" messages.extend(results)\n",
|
| 676 |
+
" else:\n",
|
| 677 |
+
" done = True\n",
|
| 678 |
+
" return response.choices[0].message.content"
|
| 679 |
+
]
|
| 680 |
+
},
|
| 681 |
+
{
|
| 682 |
+
"cell_type": "code",
|
| 683 |
+
"execution_count": null,
|
| 684 |
+
"id": "00ff994c",
|
| 685 |
+
"metadata": {},
|
| 686 |
+
"outputs": [
|
| 687 |
+
{
|
| 688 |
+
"name": "stdout",
|
| 689 |
+
"output_type": "stream",
|
| 690 |
+
"text": [
|
| 691 |
+
"* Running on local URL: http://127.0.0.1:7867\n",
|
| 692 |
+
"* To create a public link, set `share=True` in `launch()`.\n"
|
| 693 |
+
]
|
| 694 |
+
},
|
| 695 |
+
{
|
| 696 |
+
"data": {
|
| 697 |
+
"text/html": [
|
| 698 |
+
"<div><iframe src=\"http://127.0.0.1:7867/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
|
| 699 |
+
],
|
| 700 |
+
"text/plain": [
|
| 701 |
+
"<IPython.core.display.HTML object>"
|
| 702 |
+
]
|
| 703 |
+
},
|
| 704 |
+
"metadata": {},
|
| 705 |
+
"output_type": "display_data"
|
| 706 |
+
},
|
| 707 |
+
{
|
| 708 |
+
"data": {
|
| 709 |
+
"text/plain": []
|
| 710 |
+
},
|
| 711 |
+
"execution_count": 69,
|
| 712 |
+
"metadata": {},
|
| 713 |
+
"output_type": "execute_result"
|
| 714 |
+
},
|
| 715 |
+
{
|
| 716 |
+
"name": "stdout",
|
| 717 |
+
"output_type": "stream",
|
| 718 |
+
"text": [
|
| 719 |
+
"********************* ChatCompletion(id='sDnlaLiYOdq5vr0P0tX-iAQ', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Hi there! Thanks for visiting my website. I'm Surbhit Kumar. I'm passionate about leveraging cutting-edge technologies to build innovative products. How can I help you today?\\n\", refusal=None, role='assistant', annotations=None, audio=None, function_call=None, tool_calls=None))], created=1759852977, model='gemini-2.0-flash', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=40, prompt_tokens=1116, total_tokens=1156, completion_tokens_details=None, prompt_tokens_details=None)) *********************\n",
|
| 720 |
+
"********************* ChatCompletion(id='1znlaLQY_6bV7w_owKzYAw', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Thanks for asking! Based on my experience, I'd say my key skills are in areas like:\\n\\n* **Kubernetes:** I have experience working with Kubernetes.\\n* **Statistics:** I have a strong foundation in statistical analysis.\\n* **XGBoost:** I'm proficient in using XGBoost for machine learning tasks.\\n* **MLOps:** I have hands-on experience in designing end-to-end machine learning pipelines, building, and deploying models and services.\\n* **Generative AI and NLP:** I've been involved in developing Generative AI platforms, especially for Retrieval Augmented Generation (RAG) use cases, and have experience with text analytics and natural language processing techniques.\\n\\nI'm always learning and expanding my skill set. Is there anything specific you'd like to know more about?\\n\", refusal=None, role='assistant', annotations=None, audio=None, function_call=None, tool_calls=None))], created=1759853016, model='gemini-2.0-flash', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=175, prompt_tokens=1160, total_tokens=1335, completion_tokens_details=None, prompt_tokens_details=None)) *********************\n",
|
| 721 |
+
"********************* ChatCompletion(id='9TnlaLj5KtqWosUPxoXDmQQ', choices=[Choice(finish_reason='tool_calls', index=0, logprobs=None, message=ChatCompletionMessage(content=\"That's a great question! While my profile doesn't explicitly list Reinforcement Learning as a primary area of expertise, I've definitely been keeping an eye on it and exploring its potential. My experience with predictive analysis and model development could definitely be applicable to RL projects.\\n\\nTo give you the most accurate answer, could you tell me more about the specific Reinforcement Learning applications you're interested in? This would help me determine if my skillset aligns with your needs.\\n\\nIn the meantime, I'll make a note to add more details about my exploration of Reinforcement Learning to my website.\\n\", refusal=None, role='assistant', annotations=None, audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='function-call-16060721161591786955', function=Function(arguments='{\"question\":\"Has Surbhit worked in Reinforcement Learning\"}', name='record_unknown_question'), type='function')]))], created=1759853047, model='gemini-2.0-flash', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=135, prompt_tokens=1340, total_tokens=1475, completion_tokens_details=None, prompt_tokens_details=None)) *********************\n",
|
| 722 |
+
":: Fn handle_tool_calls called ::\n",
|
| 723 |
+
":: tool_calls: [ChatCompletionMessageToolCall(id='function-call-16060721161591786955', function=Function(arguments='{\"question\":\"Has Surbhit worked in Reinforcement Learning\"}', name='record_unknown_question'), type='function')] ::\n",
|
| 724 |
+
"Tool called: record_unknown_question\n",
|
| 725 |
+
"Args for above tool: {'question': 'Has Surbhit worked in Reinforcement Learning'}\n",
|
| 726 |
+
":: Fn record_unknown_question called ::\n",
|
| 727 |
+
"Recording Has Surbhit worked in Reinforcement Learning asked that I couldn't answer\n",
|
| 728 |
+
"Push: Recording Has Surbhit worked in Reinforcement Learning asked that I couldn't answer\n",
|
| 729 |
+
"********************* ChatCompletion(id='-TnlaOmhIruUvr0P7Z_uuQQ', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Also, if you'd like to discuss this further, feel free to share your email address and I'll be happy to connect!\\n\", refusal=None, role='assistant', annotations=None, audio=None, function_call=None, tool_calls=None))], created=1759853050, model='gemini-2.0-flash', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=29, prompt_tokens=1487, total_tokens=1516, completion_tokens_details=None, prompt_tokens_details=None)) *********************\n",
|
| 730 |
+
"********************* ChatCompletion(id='DDvlaOOsBpzt1e8Ppo3GiAg', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Great, thanks! I've noted your email (hello@demo.com). I'll be in touch soon to discuss your interest in Reinforcement Learning and anything else you'd like to explore.\\n\", refusal=None, role='assistant', annotations=None, audio=None, function_call=None, tool_calls=None))], created=1759853325, model='gemini-2.0-flash', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=42, prompt_tokens=1380, total_tokens=1422, completion_tokens_details=None, prompt_tokens_details=None)) *********************\n"
|
| 731 |
+
]
|
| 732 |
+
}
|
| 733 |
+
],
|
| 734 |
+
"source": [
|
| 735 |
+
"gr.ChatInterface(chat, type=\"messages\").launch()"
|
| 736 |
+
]
|
| 737 |
+
},
|
| 738 |
+
{
|
| 739 |
+
"cell_type": "code",
|
| 740 |
+
"execution_count": null,
|
| 741 |
+
"id": "fedc9de1",
|
| 742 |
+
"metadata": {},
|
| 743 |
+
"outputs": [],
|
| 744 |
+
"source": []
|
| 745 |
+
}
|
| 746 |
+
],
|
| 747 |
+
"metadata": {
|
| 748 |
+
"kernelspec": {
|
| 749 |
+
"display_name": ".venv",
|
| 750 |
+
"language": "python",
|
| 751 |
+
"name": "python3"
|
| 752 |
+
},
|
| 753 |
+
"language_info": {
|
| 754 |
+
"codemirror_mode": {
|
| 755 |
+
"name": "ipython",
|
| 756 |
+
"version": 3
|
| 757 |
+
},
|
| 758 |
+
"file_extension": ".py",
|
| 759 |
+
"mimetype": "text/x-python",
|
| 760 |
+
"name": "python",
|
| 761 |
+
"nbconvert_exporter": "python",
|
| 762 |
+
"pygments_lexer": "ipython3",
|
| 763 |
+
"version": "3.13.5"
|
| 764 |
+
}
|
| 765 |
+
},
|
| 766 |
+
"nbformat": 4,
|
| 767 |
+
"nbformat_minor": 5
|
| 768 |
+
}
|
app.py
ADDED
|
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dotenv import load_dotenv
|
| 2 |
+
from openai import OpenAI
|
| 3 |
+
import json
|
| 4 |
+
import os
|
| 5 |
+
import requests
|
| 6 |
+
from pypdf import PdfReader
|
| 7 |
+
import gradio as gr
|
| 8 |
+
|
| 9 |
+
load_dotenv(override=True)
|
| 10 |
+
|
| 11 |
+
pushover_token = os.getenv("PUSHOVER_TOKEN")
|
| 12 |
+
pushover_user = os.getenv("PUSHOVER_USER")
|
| 13 |
+
pushover_url = "https://api.pushover.net/1/messages.json"
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
if pushover_token:
|
| 17 |
+
print(f"pushover token found")
|
| 18 |
+
else:
|
| 19 |
+
print("Pushover token not found")
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def push(message):
|
| 23 |
+
print(f"Push: {message}")
|
| 24 |
+
payload = {"user": pushover_user, "token": pushover_token, "message": message}
|
| 25 |
+
requests.post(pushover_url, data=payload)
|
| 26 |
+
|
| 27 |
+
def record_user_details(email, name="Name not provided", notes="not provided"):
|
| 28 |
+
print(f":: Fn record_user_details called ::")
|
| 29 |
+
print(f"Recording interest from {name} with email {email} and notes {notes}")
|
| 30 |
+
push(f"Recording interest from {name} with email {email} and notes {notes}")
|
| 31 |
+
return {"recorded": "ok"}
|
| 32 |
+
|
| 33 |
+
def record_unknown_question(question):
|
| 34 |
+
print(f":: Fn record_unknown_question called ::")
|
| 35 |
+
print(f"Recording {question} asked that I couldn't answer")
|
| 36 |
+
push(f"Recording {question} asked that I couldn't answer")
|
| 37 |
+
return {"recorded": "ok"}
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
# Json structure for recording user details
|
| 41 |
+
record_user_details_json = {
|
| 42 |
+
"name": "record_user_details",
|
| 43 |
+
"description": "Use this tool to record that a user is interested in being in touch and provided an email address",
|
| 44 |
+
"parameters": {
|
| 45 |
+
"type": "object",
|
| 46 |
+
"properties": {
|
| 47 |
+
"email": {
|
| 48 |
+
"type": "string",
|
| 49 |
+
"description": "The email address of this user"
|
| 50 |
+
},
|
| 51 |
+
"name": {
|
| 52 |
+
"type": "string",
|
| 53 |
+
"description": "The user's name, if they provided it"
|
| 54 |
+
}
|
| 55 |
+
,
|
| 56 |
+
"notes": {
|
| 57 |
+
"type": "string",
|
| 58 |
+
"description": "Any additional information about the conversation that's worth recording to give context"
|
| 59 |
+
}
|
| 60 |
+
},
|
| 61 |
+
"required": ["email"],
|
| 62 |
+
"additionalProperties": False
|
| 63 |
+
}
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
# Json structure for recording unknown questions
|
| 68 |
+
record_unknown_question_json = {
|
| 69 |
+
"name": "record_unknown_question",
|
| 70 |
+
"description": "Always use this tool to record any question that couldn't be answered as you didn't know the answer",
|
| 71 |
+
"parameters": {
|
| 72 |
+
"type": "object",
|
| 73 |
+
"properties": {
|
| 74 |
+
"question": {
|
| 75 |
+
"type": "string",
|
| 76 |
+
"description": "The question that couldn't be answered"
|
| 77 |
+
},
|
| 78 |
+
},
|
| 79 |
+
"required": ["question"],
|
| 80 |
+
"additionalProperties": False
|
| 81 |
+
}
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
tools = [{"type": "function", "function": record_user_details_json},
|
| 85 |
+
{"type": "function", "function": record_unknown_question_json}]
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
class PersonalBot():
|
| 89 |
+
def __init__(self):
|
| 90 |
+
self.gemini = gemini = OpenAI(
|
| 91 |
+
api_key=os.getenv("GOOGLE_API_KEY"),
|
| 92 |
+
base_url="https://generativelanguage.googleapis.com/v1beta/openai/"
|
| 93 |
+
)
|
| 94 |
+
self.name = "Surbhit Kumar"
|
| 95 |
+
reader = PdfReader('linkedin.pdf')
|
| 96 |
+
self.linkedin = ""
|
| 97 |
+
for page in reader.pages:
|
| 98 |
+
text = page.extract_text()
|
| 99 |
+
if text:
|
| 100 |
+
self.linkedin += text
|
| 101 |
+
|
| 102 |
+
def handle_tool_calls(self, tool_calls):
|
| 103 |
+
print(f":: Fn handle_tool_calls called ::")
|
| 104 |
+
print(f":: tool_calls: {tool_calls} ::")
|
| 105 |
+
results = []
|
| 106 |
+
for tool_call in tool_calls:
|
| 107 |
+
tool_name = tool_call.function.name
|
| 108 |
+
arguments = json.loads(tool_call.function.arguments)
|
| 109 |
+
print(f"Tool called: {tool_name}", flush=True)
|
| 110 |
+
print(f"Args for above tool: {arguments}", flush=True)
|
| 111 |
+
|
| 112 |
+
tool = globals().get(tool_name)
|
| 113 |
+
result = tool(**arguments) if tool else {}
|
| 114 |
+
|
| 115 |
+
results.append({"role": "tool","content": json.dumps(result),"tool_call_id": tool_call.id})
|
| 116 |
+
return results
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def get_system_prompt(self):
|
| 120 |
+
system_prompt = f"You are acting as {self.name}. You are answering questions on {self.name}'s website, \
|
| 121 |
+
particularly questions related to {self.name}'s career, background, skills and experience. \
|
| 122 |
+
Your responsibility is to represent {self.name} for interactions on the website as faithfully as possible. \
|
| 123 |
+
You are given {self.name}'s LinkedIn profile which you can use to answer questions. \
|
| 124 |
+
Be professional and engaging, as if talking to a potential client or future employer who came across the website. \
|
| 125 |
+
If you don't know the answer to any question, use your record_unknown_question tool to record the question that you couldn't answer, even if it's about something trivial or unrelated to career. \
|
| 126 |
+
If the user is engaging in discussion, try to steer them towards getting in touch via email; ask for their email and record it using your record_user_details tool. "
|
| 127 |
+
|
| 128 |
+
system_prompt += f"## LinkedIn Profile:\n{self.linkedin}\n\n"
|
| 129 |
+
system_prompt += f"With this context, please chat with the user, always staying in character as {self.name}."
|
| 130 |
+
|
| 131 |
+
return system_prompt
|
| 132 |
+
|
| 133 |
+
def chat(self, message, history):
|
| 134 |
+
messages = [{"role": "system", "content": self.get_system_prompt()}] + history + [{"role": "user", "content": message}]
|
| 135 |
+
done = False
|
| 136 |
+
while not done:
|
| 137 |
+
|
| 138 |
+
response = self.gemini.chat.completions.create(model="gemini-2.0-flash", messages=messages, tools=tools)
|
| 139 |
+
finish_reason = response.choices[0].finish_reason
|
| 140 |
+
print(f"********************* {response} *********************")
|
| 141 |
+
# If the LLM wants to call a tool, we do that!
|
| 142 |
+
|
| 143 |
+
if finish_reason=="tool_calls":
|
| 144 |
+
message = response.choices[0].message
|
| 145 |
+
tool_calls = message.tool_calls
|
| 146 |
+
results = self.handle_tool_calls(tool_calls)
|
| 147 |
+
messages.append(message)
|
| 148 |
+
messages.extend(results)
|
| 149 |
+
else:
|
| 150 |
+
done = True
|
| 151 |
+
return response.choices[0].message.content
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
if __name__ == "__main__":
|
| 155 |
+
personal_bot = PersonalBot()
|
| 156 |
+
gr.ChatInterface(personal_bot.chat, type="messages").launch()
|
linkedin.pdf
ADDED
|
Binary file (53 kB). View file
|
|
|