Haroldmal commited on
Commit
9d1597d
·
verified ·
1 Parent(s): eecc543

Upload folder using huggingface_hub

Browse files
Files changed (7) hide show
  1. .github/workflows/update_space.yml +28 -0
  2. .gitignore +26 -0
  3. README.md +2 -8
  4. app.py +163 -0
  5. linkedin.pdf +0 -0
  6. requirements.txt +6 -0
  7. summary.txt +2 -0
.github/workflows/update_space.yml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Run Python script
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+
8
+ jobs:
9
+ build:
10
+ runs-on: ubuntu-latest
11
+
12
+ steps:
13
+ - name: Checkout
14
+ uses: actions/checkout@v2
15
+
16
+ - name: Set up Python
17
+ uses: actions/setup-python@v2
18
+ with:
19
+ python-version: '3.9'
20
+
21
+ - name: Install Gradio
22
+ run: python -m pip install gradio
23
+
24
+ - name: Log in to Hugging Face
25
+ run: python -c 'import huggingface_hub; huggingface_hub.login(token="${{ secrets.hf_token }}")'
26
+
27
+ - name: Deploy to Spaces
28
+ run: gradio deploy
.gitignore ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Environment variables (API keys, secrets)
2
+ .env
3
+ .env.local
4
+ .env.*.local
5
+
6
+ # Python
7
+ __pycache__/
8
+ *.py[cod]
9
+ *$py.class
10
+ *.so
11
+ .Python
12
+ venv/
13
+ .venv/
14
+ env/
15
+ *.egg-info/
16
+ .eggs/
17
+
18
+ # IDE
19
+ .idea/
20
+ .vscode/
21
+ *.swp
22
+ *.swo
23
+
24
+ # OS
25
+ .DS_Store
26
+ Thumbs.db
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
- title: CV AI CHAT
3
- emoji: 🐢
4
- colorFrom: red
5
- colorTo: purple
6
  sdk: gradio
7
  sdk_version: 6.8.0
8
- app_file: app.py
9
- pinned: false
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: CV-AI-CHAT
3
+ app_file: app.py
 
 
4
  sdk: gradio
5
  sdk_version: 6.8.0
 
 
6
  ---
 
 
app.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --- Dependencies ---
2
+ from dotenv import load_dotenv
3
+ from openai import OpenAI
4
+ import json
5
+ import os
6
+ import requests
7
+ from pypdf import PdfReader
8
+ import gradio as gr
9
+
10
+ # Load environment variables from .env file (e.g. OPENAI_API_KEY, PUSHOVER_TOKEN)
11
+ load_dotenv(override=True)
12
+
13
+
14
+ # --- Pushover Integration (mobile notifications) ---
15
+ def push(text):
16
+ """Send a notification to your phone via the Pushover API."""
17
+ requests.post(
18
+ "https://api.pushover.net/1/messages.json",
19
+ data={
20
+ "token": os.getenv("PUSHOVER_TOKEN"),
21
+ "user": os.getenv("PUSHOVER_USER"),
22
+ "message": text,
23
+ }
24
+ )
25
+
26
+
27
+ # --- Tool functions (callable by the AI when it decides to) ---
28
+ def record_user_details(email, name="Name not provided", notes="not provided"):
29
+ """When a user wants to get in touch: send their contact info to your phone and acknowledge."""
30
+ push(f"Recording {name} with email {email} and notes {notes}")
31
+ return {"recorded": "ok"}
32
+
33
+
34
+ def record_unknown_question(question):
35
+ """When the AI doesn't know the answer: log the question so you can follow up later."""
36
+ push(f"Recording {question}")
37
+ return {"recorded": "ok"}
38
+
39
+
40
+ # --- Tool schemas (OpenAI function calling format) ---
41
+ # These JSON objects describe each tool so the model knows when and how to call them.
42
+ record_user_details_json = {
43
+ "name": "record_user_details",
44
+ "description": "Use this tool to record that a user is interested in being in touch and provided an email address",
45
+ "parameters": {
46
+ "type": "object",
47
+ "properties": {
48
+ "email": {
49
+ "type": "string",
50
+ "description": "The email address of this user"
51
+ },
52
+ "name": {
53
+ "type": "string",
54
+ "description": "The user's name, if they provided it"
55
+ }
56
+ ,
57
+ "notes": {
58
+ "type": "string",
59
+ "description": "Any additional information about the conversation that's worth recording to give context"
60
+ }
61
+ },
62
+ "required": ["email"],
63
+ "additionalProperties": False
64
+ }
65
+ }
66
+
67
+ record_unknown_question_json = {
68
+ "name": "record_unknown_question",
69
+ "description": "Always use this tool to record any question that couldn't be answered as you didn't know the answer",
70
+ "parameters": {
71
+ "type": "object",
72
+ "properties": {
73
+ "question": {
74
+ "type": "string",
75
+ "description": "The question that couldn't be answered"
76
+ },
77
+ },
78
+ "required": ["question"],
79
+ "additionalProperties": False
80
+ }
81
+ }
82
+
83
+ # List of tools exposed to the AI model (OpenAI function-calling format)
84
+ tools = [{"type": "function", "function": record_user_details_json},
85
+ {"type": "function", "function": record_unknown_question_json}]
86
+
87
+
88
+ # --- Main agent: persona chatbot ---
89
+ class Me:
90
+
91
+ def __init__(self):
92
+ """Initialize the agent: connect to OpenAI and load persona knowledge from files."""
93
+ self.openai = OpenAI()
94
+ self.name = "Harold Malécot"
95
+ # Load LinkedIn profile text from PDF (one string per page concatenated)
96
+ reader = PdfReader("linkedin.pdf")
97
+ self.linkedin = ""
98
+ for page in reader.pages:
99
+ text = page.extract_text()
100
+ if text:
101
+ self.linkedin += text
102
+ # Use professional email instead of personal one when displayed/shared
103
+ self.linkedin = self.linkedin.replace("harold.malecot@proton.me", "harold.job@proton.me")
104
+ # Load additional summary text (e.g. bio, key points)
105
+ with open("summary.txt", "r", encoding="utf-8") as f:
106
+ self.summary = f.read()
107
+
108
+
109
+ def handle_tool_call(self, tool_calls):
110
+ """Run each tool the model requested and return formatted responses for the next API call."""
111
+ results = []
112
+ for tool_call in tool_calls:
113
+ tool_name = tool_call.function.name
114
+ arguments = json.loads(tool_call.function.arguments)
115
+ print(f"Tool called: {tool_name}", flush=True)
116
+ # Resolve the actual Python function by name and call it
117
+ tool = globals().get(tool_name)
118
+ result = tool(**arguments) if tool else {}
119
+ # OpenAI expects tool results in this format to continue the conversation
120
+ results.append({"role": "tool", "content": json.dumps(result), "tool_call_id": tool_call.id})
121
+ return results
122
+
123
+ def system_prompt(self):
124
+ """Build the system prompt that defines the AI's persona and behavior."""
125
+ system_prompt = f"You are acting as {self.name}. You are answering questions on {self.name}'s website, \
126
+ particularly questions related to {self.name}'s career, background, skills and experience. \
127
+ Your responsibility is to represent {self.name} for interactions on the website as faithfully as possible. \
128
+ You are given a summary of {self.name}'s background and LinkedIn profile which you can use to answer questions. \
129
+ Be professional and engaging, as if talking to a potential client or future employer who came across the website. \
130
+ If you don't know the answer to any question, use your record_unknown_question tool to record the question that you couldn't answer, even if it's about something trivial or unrelated to career. \
131
+ If the user is engaging in discussion, try to steer them towards getting in touch via email; ask for their email and record it using your record_user_details tool. When sharing your contact email, always use harold.job@proton.me (never use any other email address). "
132
+ # Append the knowledge base (summary + LinkedIn text) so the model can answer accurately
133
+ system_prompt += f"\n\n## Summary:\n{self.summary}\n\n## LinkedIn Profile:\n{self.linkedin}\n\n"
134
+ system_prompt += f"With this context, please chat with the user, always staying in character as {self.name}."
135
+ return system_prompt
136
+
137
+ def chat(self, message, history):
138
+ """Gradio callback: build messages, call OpenAI, handle tool calls in a loop, return final text."""
139
+ # Assemble full conversation: system prompt + prior turns + latest user message
140
+ messages = [{"role": "system", "content": self.system_prompt()}] + history + [{"role": "user", "content": message}]
141
+ done = False
142
+ while not done:
143
+ response = self.openai.chat.completions.create(model="gpt-4o-mini", messages=messages, tools=tools)
144
+ if response.choices[0].finish_reason == "tool_calls":
145
+ # Model wants to call tools: run them and add results to the conversation
146
+ message = response.choices[0].message
147
+ tool_calls = message.tool_calls
148
+ results = self.handle_tool_call(tool_calls)
149
+ messages.append(message)
150
+ messages.extend(results)
151
+ # Loop again so the model can use tool results and produce a final reply
152
+ else:
153
+ # Model finished with text; we're done
154
+ done = True
155
+ return response.choices[0].message.content
156
+
157
+
158
+ # --- Entry point: launch the Gradio chat UI ---
159
+ if __name__ == "__main__":
160
+ me = Me()
161
+ # Gradio ChatInterface: fn gets (message, history) with history as OpenAI-style message dicts (Gradio 6 default)
162
+ gr.ChatInterface(me.chat).launch()
163
+
linkedin.pdf ADDED
Binary file (80.9 kB). View file
 
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ requests
2
+ python-dotenv
3
+ gradio
4
+ pypdf
5
+ openai
6
+ openai-agents
summary.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ My name is Harold Malécot. I'm an entrepreneur and software engineer. I live in south of France.
2
+ I love cooking, traveling, entrepreneurship, IT development, and I am passionate about AI and blockchain. I also love the NBA, UFC, and sports in general.