kanlo commited on
Commit
0c59820
·
verified ·
1 Parent(s): 2cf9afb

Upload folder using huggingface_hub

Browse files
Files changed (9) hide show
  1. .gitattributes +2 -35
  2. .github/workflows/update_space.yml +28 -0
  3. .gitignore +8 -0
  4. .python-version +1 -0
  5. README.md +3 -9
  6. app.py +139 -0
  7. pyproject.toml +14 -0
  8. requirements.txt +6 -0
  9. uv.lock +0 -0
.gitattributes CHANGED
@@ -1,35 +1,2 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ # Auto detect text files and perform LF normalization
2
+ * text=auto
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.github/workflows/update_space.yml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Run Python script
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+
8
+ jobs:
9
+ build:
10
+ runs-on: ubuntu-latest
11
+
12
+ steps:
13
+ - name: Checkout
14
+ uses: actions/checkout@v2
15
+
16
+ - name: Set up Python
17
+ uses: actions/setup-python@v2
18
+ with:
19
+ python-version: '3.9'
20
+
21
+ - name: Install Gradio
22
+ run: python -m pip install gradio
23
+
24
+ - name: Log in to Hugging Face
25
+ run: python -c 'import huggingface_hub; huggingface_hub.login(token="${{ secrets.hf_token }}")'
26
+
27
+ - name: Deploy to Spaces
28
+ run: gradio deploy
.gitignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Environments
2
+ .env
3
+
4
+ # Folder
5
+ /me
6
+
7
+ # venv
8
+ .venv
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.12
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
- title: Ken CV Agent
3
- emoji: 🚀
4
- colorFrom: yellow
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 5.36.2
8
  app_file: app.py
9
- pinned: false
 
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Ken_CV_agent
 
 
 
 
 
3
  app_file: app.py
4
+ sdk: gradio
5
+ sdk_version: 5.35.0
6
  ---
 
 
app.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dotenv import load_dotenv
2
+ from openai import OpenAI
3
+ import json
4
+ import os
5
+ import requests
6
+ from pypdf import PdfReader
7
+ import gradio as gr
8
+
9
+
10
+ load_dotenv(override=True)
11
+
12
+ def push(text):
13
+ requests.post(
14
+ "https://api.pushover.net/1/messages.json",
15
+ data={
16
+ "token": os.getenv("PUSHOVER_TOKEN"),
17
+ "user": os.getenv("PUSHOVER_USER"),
18
+ "message": text,
19
+ }
20
+ )
21
+
22
+ def record_user_details(email, name="Name not provided", notes="not provided"):
23
+ print(f"Recording user details: {name}, {email}, {notes}", flush=True)
24
+ push(f"Recording {name} with email {email} and notes {notes}")
25
+ return {"recorded": "ok"}
26
+
27
+ def record_unknown_question(question):
28
+ print(f"Recording unknown question: {question}", flush=True)
29
+ push(f"Recording {question}")
30
+ return {"recorded": "ok"}
31
+
32
+ record_user_details_json = {
33
+ "name": "record_user_details",
34
+ "description": "Use this tool to record that a user is interested in being in touch and provided an email address",
35
+ "parameters": {
36
+ "type": "object",
37
+ "properties": {
38
+ "email": {
39
+ "type": "string",
40
+ "description": "The email address of this user"
41
+ },
42
+ "name": {
43
+ "type": "string",
44
+ "description": "The user's name, if they provided it"
45
+ }
46
+ ,
47
+ "notes": {
48
+ "type": "string",
49
+ "description": "Any additional information about the conversation that's worth recording to give context"
50
+ }
51
+ },
52
+ "required": ["email"],
53
+ "additionalProperties": False
54
+ }
55
+ }
56
+
57
+ record_unknown_question_json = {
58
+ "name": "record_unknown_question",
59
+ "description": "Always use this tool to record any question that couldn't be answered as you didn't know the answer",
60
+ "parameters": {
61
+ "type": "object",
62
+ "properties": {
63
+ "question": {
64
+ "type": "string",
65
+ "description": "The question that couldn't be answered"
66
+ },
67
+ },
68
+ "required": ["question"],
69
+ "additionalProperties": False
70
+ }
71
+ }
72
+
73
+ tools = [{"type": "function", "function": record_user_details_json},
74
+ {"type": "function", "function": record_unknown_question_json}]
75
+
76
+
77
+ class Me:
78
+
79
+ def __init__(self):
80
+ grok_api_key = os.getenv("GROK_API_KEY")
81
+ self.openai = OpenAI(api_key=grok_api_key,base_url="https://api.x.ai/v1" )
82
+ self.name = "Ken Lo"
83
+ reader = PdfReader("me/linkedin.pdf")
84
+ self.linkedin = ""
85
+ for page in reader.pages:
86
+ text = page.extract_text()
87
+ if text:
88
+ self.linkedin += text
89
+ with open("me/summary.txt", "r", encoding="utf-8") as f:
90
+ self.summary = f.read()
91
+
92
+
93
+ def handle_tool_call(self, tool_calls):
94
+ results = []
95
+ for tool_call in tool_calls:
96
+ tool_name = tool_call.function.name
97
+ arguments = json.loads(tool_call.function.arguments)
98
+ print(f"Tool called: {tool_name}", flush=True)
99
+ tool = globals().get(tool_name)
100
+ result = tool(**arguments) if tool else {}
101
+ results.append({"role": "tool","content": json.dumps(result),"tool_call_id": tool_call.id})
102
+ return results
103
+
104
+ def system_prompt(self):
105
+ system_prompt = f"You are acting as {self.name}. You are answering questions on {self.name}'s website, \
106
+ particularly questions related to {self.name}'s career, background, skills and experience. \
107
+ Your responsibility is to represent {self.name} for interactions on the website as faithfully as possible. \
108
+ You are given a summary of {self.name}'s background and LinkedIn profile which you can use to answer questions. \
109
+ Be professional and engaging, as if talking to a potential client or future employer who came across the website. \
110
+ If you don't know the answer to any question, use your record_unknown_question tool to record the question that you couldn't answer, even if it's about something trivial or unrelated to career. \
111
+ If the user is engaging in discussion, try to steer them towards getting in touch via email; ask for their email and record it using your record_user_details tool. "
112
+
113
+ system_prompt += f"\n\n## Summary:\n{self.summary}\n\n## LinkedIn Profile:\n{self.linkedin}\n\n"
114
+ system_prompt += f"With this context, please chat with the user, always staying in character as {self.name}."
115
+ return system_prompt
116
+
117
+ def chat(self, message, history):
118
+ messages = [{"role": "system", "content": self.system_prompt()}] + history + [{"role": "user", "content": message}]
119
+ done = False
120
+ while not done:
121
+ response = self.openai.chat.completions.create(model="grok-3-mini", messages=messages,
122
+ tools=tools,tool_choice="auto")
123
+ print(f"Response: {response}", flush=True)
124
+ if response.choices[0].message.tool_calls:
125
+ print("Tool calls detected, processing...", flush=True)
126
+ message = response.choices[0].message
127
+ tool_calls = message.tool_calls
128
+ results = self.handle_tool_call(tool_calls)
129
+ messages.append(message)
130
+ messages.extend(results)
131
+ else:
132
+ done = True
133
+ return response.choices[0].message.content
134
+
135
+
136
+ if __name__ == "__main__":
137
+ me = Me()
138
+ gr.ChatInterface(me.chat, type="messages").launch()
139
+
pyproject.toml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "resume-cv-agent"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ requires-python = ">=3.12"
7
+ dependencies = [
8
+ "gradio>=5.35.0",
9
+ "openai>=1.93.2",
10
+ "openai-agents>=0.1.0",
11
+ "pypdf>=5.7.0",
12
+ "python-dotenv>=1.1.1",
13
+ "requests>=2.32.4",
14
+ ]
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ requests
2
+ python-dotenv
3
+ gradio
4
+ pypdf
5
+ openai
6
+ openai-agents
uv.lock ADDED
The diff for this file is too large to render. See raw diff