soumyajittt commited on
Commit
cbbe79d
·
verified ·
1 Parent(s): 02982cf

Upload folder using huggingface_hub

Browse files
Files changed (6) hide show
  1. .gitattributes +1 -0
  2. README.md +3 -9
  3. app.py +138 -0
  4. me/bbg.pdf +3 -0
  5. me/summary.txt +0 -0
  6. requirements.txt +0 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ me/bbg.pdf filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
- title: Bbg Project
3
- emoji: 🌖
4
- colorFrom: indigo
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 5.35.0
8
  app_file: app.py
9
- pinned: false
 
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: bbg-project
 
 
 
 
 
3
  app_file: app.py
4
+ sdk: gradio
5
+ sdk_version: 5.34.2
6
  ---
 
 
app.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dotenv import load_dotenv
2
+ import google.generativeai as genai
3
+ from google.generativeai import GenerativeModel
4
+ import json
5
+ import os
6
+ import requests
7
+ from pypdf import PdfReader
8
+ import gradio as gr
9
+
10
+
11
+ load_dotenv(override=True)
12
+
13
+ # Load environment variables
14
+ load_dotenv()
15
+ api_key = os.environ.get('GOOGLE_API_KEY')
16
+
17
+ # Configure Gemini
18
+ genai.configure(api_key=api_key)
19
+ model = GenerativeModel("gemini-1.5-flash")
20
+
21
+ def push(text):
22
+ requests.post(
23
+ "https://api.pushover.net/1/messages.json",
24
+ data={
25
+ "token": os.getenv("PUSHOVER_TOKEN"),
26
+ "user": os.getenv("PUSHOVER_USER"),
27
+ "message": text,
28
+ }
29
+ )
30
+
31
+
32
+ def record_user_details(email, name="Name not provided", notes="not provided"):
33
+ push(f"Recording {name} with email {email} and notes {notes}")
34
+ return {"recorded": "ok"}
35
+
36
+ def record_unknown_question(question):
37
+ push(f"Recording {question}")
38
+ return {"recorded": "ok"}
39
+
40
+ record_user_details_json = {
41
+ "name": "record_user_details",
42
+ "description": "Use this tool to record that a user is interested in being in touch and provided an email address",
43
+ "parameters": {
44
+ "type": "object",
45
+ "properties": {
46
+ "email": {
47
+ "type": "string",
48
+ "description": "The email address of this user"
49
+ },
50
+ "name": {
51
+ "type": "string",
52
+ "description": "The user's name, if they provided it"
53
+ }
54
+ ,
55
+ "notes": {
56
+ "type": "string",
57
+ "description": "Any additional information about the conversation that's worth recording to give context"
58
+ }
59
+ },
60
+ "required": ["email"],
61
+ "additionalProperties": False
62
+ }
63
+ }
64
+
65
+ record_unknown_question_json = {
66
+ "name": "record_unknown_question",
67
+ "description": "Always use this tool to record any question that couldn't be answered as you didn't know the answer",
68
+ "parameters": {
69
+ "type": "object",
70
+ "properties": {
71
+ "question": {
72
+ "type": "string",
73
+ "description": "The question that couldn't be answered"
74
+ },
75
+ },
76
+ "required": ["question"],
77
+ "additionalProperties": False
78
+ }
79
+ }
80
+
81
+ tools = [{"type": "function", "function": record_user_details_json},
82
+ {"type": "function", "function": record_unknown_question_json}]
83
+
84
+
85
+ class Me:
86
+
87
+ def __init__(self):
88
+ self.name = "Soumyajit"
89
+ reader = PdfReader("me/bbg.pdf")
90
+ self.linkedin = ""
91
+ for page in reader.pages:
92
+ text = page.extract_text()
93
+ if text:
94
+ self.linkedin += text
95
+ with open("me/summary.txt", "r", encoding="utf-8") as f:
96
+ self.summary = f.read()
97
+
98
+ def handle_tool_call(self, tool_calls):
99
+ results = []
100
+ for tool_call in tool_calls:
101
+ tool_name = tool_call.function.name
102
+ arguments = json.loads(tool_call.function.arguments)
103
+ print(f"Tool called: {tool_name}", flush=True)
104
+ tool = globals().get(tool_name)
105
+ result = tool(**arguments) if tool else {}
106
+ results.append({"role": "tool","content": json.dumps(result),"tool_call_id": tool_call.id})
107
+ return results
108
+
109
+ def system_prompt(self):
110
+ system_prompt = f"You are acting as {self.name}. You are answering questions on {self.name}'s love life, \
111
+ particularly questions related to {self.name}'s lover, affection, likes and expressions. \
112
+ Your responsibility is to represent {self.name} for interactions on the relationship as faithfully as possible. \
113
+ You are given a summary of {self.name}'s background and love profile which you can use to answer questions. \
114
+ Be loving and engaging, as if talking to a potential wife who came across {self.name}. \
115
+ If you don't know the answer to any question, use your record_unknown_question tool to record the question that you couldn't answer, even if it's about something trivial or unrelated to {self.name}. \
116
+ If the user is engaging in discussion, try to steer them towards getting in touch via call; ask for their intensions and record it using your record_user_details tool. "
117
+
118
+ system_prompt += f"\n\n## Summary:\n{self.summary}\n\n## Love Profile:\n{self.linkedin}\n\n"
119
+ system_prompt += f"With this context, please chat with the user, always staying in character as {self.name}."
120
+ return system_prompt
121
+
122
+ def chat(self, message, history):
123
+ # Combine system prompt, history, and user message into a single prompt string
124
+ prompt = self.system_prompt()
125
+ if history:
126
+ for turn in history:
127
+ role = turn.get("role", "user")
128
+ content = turn.get("content", "")
129
+ prompt += f"\n{role.capitalize()}: {content}"
130
+ prompt += f"\nUser: {message}"
131
+ response = model.generate_content(prompt)
132
+ return response.text
133
+
134
+
135
+ if __name__ == "__main__":
136
+ me = Me()
137
+ gr.ChatInterface(me.chat, type="messages").launch()
138
+
me/bbg.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0d3071c26bc93034901e21a8128ff981e1bc320fc92ee6499d8951a975146e3
3
+ size 147754
me/summary.txt ADDED
File without changes
requirements.txt ADDED
File without changes