Ananthusajeev190 commited on
Commit
8749343
·
verified ·
1 Parent(s): e3fabcb

Upload 8 files

Browse files
config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_name": "password_tester_ai",
3
+ "dtype": "int8",
4
+ "safetensors_file": "password_tester_int8_100mb.safetensors",
5
+ "tokenizer_file": "tokenizer.json",
6
+ "vocab_file": "vocab.txt",
7
+ "max_seq_len": 2048
8
+ }
dataset.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ from safetensors.torch import load_file
4
+ import json
5
+
6
+ class PasswordTesterDataset:
7
+ def __init__(self, safetensors_path, tokenizer_path):
8
+ self.tensors = load_file(safetensors_path)
9
+ with open(tokenizer_path) as f:
10
+ self.tokenizer = json.load(f)
11
+ # flatten tensors into one long tensor
12
+ self.data = torch.cat([t for t in self.tensors.values()], dim=0)
13
+
14
+ def __len__(self):
15
+ return len(self.data)
16
+
17
+ def __getitem__(self, idx):
18
+ return self.data[idx]
duality.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from dataset import PasswordTesterDataset
3
+ import torch
4
+ import json
5
+
6
+ class DualityAI:
7
+ def __init__(self, config_path):
8
+ with open(config_path) as f:
9
+ self.config = json.load(f)
10
+ self.dataset = PasswordTesterDataset(
11
+ self.config['safetensors_file'],
12
+ self.config['tokenizer_file']
13
+ )
14
+
15
+ def interact(self, index=0):
16
+ # BODY (raw tensor)
17
+ body = self.dataset[index]
18
+ # MIND (analytical view)
19
+ mind = body.float() / 255.0
20
+ return {'BODY': body, 'MIND': mind}
21
+
22
+ # Example usage
23
+ if __name__ == "__main__":
24
+ ai = DualityAI("config.json")
25
+ result = ai.interact(0)
26
+ print("BODY tensor:", result['BODY'])
27
+ print("MIND tensor:", result['MIND'])
main.py ADDED
@@ -0,0 +1,350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import openai
3
+ import json
4
+ from googlesearch import search
5
+ import requests
6
+ from bs4 import BeautifulSoup
7
+ import re
8
+ import keys
9
+ from readability import Document
10
+
11
+ # Initialize the OpenAI API client
12
+ openai.api_key = keys.OPENAI_API_KEY
13
+
14
+ def scrape_text(url):
15
+ response = requests.get(url)
16
+ soup = BeautifulSoup(response.text, "html.parser")
17
+
18
+ for script in soup(["script", "style"]):
19
+ script.extract()
20
+
21
+ text = soup.get_text()
22
+ lines = (line.strip() for line in text.splitlines())
23
+ chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
24
+ text = '\n'.join(chunk for chunk in chunks if chunk)
25
+
26
+ return text
27
+
28
+ def scrape_main_content(url):
29
+ response = requests.get(url)
30
+
31
+ # Try using Readability
32
+ doc = Document(response.text)
33
+ content = doc.summary()
34
+ soup = BeautifulSoup(content, "html.parser")
35
+ text = soup.get_text('\n', strip=True)
36
+
37
+ # Check if Readability provided a satisfactory result (e.g., a minimum length)
38
+ # min_length = 50
39
+ # if len(text) < min_length:
40
+ # # Fallback to the custom function
41
+ # text = scrape_main_content_custom(response.text)
42
+
43
+ return text
44
+
45
+ def split_text(text, max_length=8192):
46
+ paragraphs = text.split("\n")
47
+ current_length = 0
48
+ current_chunk = []
49
+
50
+ for paragraph in paragraphs:
51
+ if current_length + len(paragraph) + 1 <= max_length:
52
+ current_chunk.append(paragraph)
53
+ current_length += len(paragraph) + 1
54
+ else:
55
+ yield "\n".join(current_chunk)
56
+ current_chunk = [paragraph]
57
+ current_length = len(paragraph) + 1
58
+
59
+ if current_chunk:
60
+ yield "\n".join(current_chunk)
61
+
62
+ def summarize_text(text):
63
+ if text == "":
64
+ return "Error: No text to summarize"
65
+
66
+ print("Text length: " + str(len(text)) + " characters")
67
+ summaries = []
68
+ chunks = list(split_text(text))
69
+
70
+ for i, chunk in enumerate(chunks):
71
+ print("Summarizing chunk " + str(i) + " / " + str(len(chunks)))
72
+ messages = [{"role": "user", "content": "Please summarize the following text, focusing on extracting concise knowledge: " + chunk},]
73
+
74
+ response= openai.ChatCompletion.create(
75
+ model="gpt-3.5-turbo",
76
+ messages=messages,
77
+ max_tokens=300,
78
+ )
79
+
80
+ summary = response.choices[0].message.content
81
+ summaries.append(summary)
82
+ print("Summarized " + str(len(chunks)) + " chunks.")
83
+
84
+ combined_summary = "\n".join(summaries)
85
+
86
+ # Summarize the combined summary
87
+ messages = [{"role": "user", "content": "Please summarize the following text, focusing on extracting concise knowledge: " + combined_summary},]
88
+
89
+ response = openai.ChatCompletion.create(
90
+ model="gpt-3.5-turbo",
91
+ messages=messages,
92
+ max_tokens=300,
93
+ )
94
+
95
+ final_summary = response.choices[0].message.content
96
+ return final_summary
97
+
98
+ def create_chat_message(role, content):
99
+ """
100
+ Create a chat message with the given role and content.
101
+
102
+ Args:
103
+ role (str): The role of the message sender, e.g., "system", "user", or "assistant".
104
+ content (str): The content of the message.
105
+
106
+ Returns:
107
+ dict: A dictionary containing the role and content of the message.
108
+ """
109
+ return {"role": role, "content": content}
110
+
111
+ def chat_with_ai(prompt, user_input, full_message_history, permanent_memory, token_limit):
112
+ """
113
+ Interact with the OpenAI API, sending the prompt, user input, message history, and permanent memory.
114
+
115
+ Args:
116
+ prompt (str): The prompt explaining the rules to the AI.
117
+ user_input (str): The input from the user.
118
+ full_message_history (list): The list of all messages sent between the user and the AI.
119
+ permanent_memory (list): The list of items in the AI's permanent memory.
120
+ token_limit (int): The maximum number of tokens allowed in the API call.
121
+
122
+ Returns:
123
+ str: The AI's response.
124
+ """
125
+ current_context = [create_chat_message("system", prompt), create_chat_message("system", f"Permanent memory: {permanent_memory}")]
126
+ current_context.extend(full_message_history[-(token_limit - len(prompt) - len(permanent_memory) - 10):])
127
+ current_context.extend([create_chat_message("user", user_input)])
128
+
129
+ # Debug print the current context
130
+ print("---------------------------")
131
+ print("Current Context:")
132
+ for message in current_context:
133
+ # Skip printing the prompt
134
+ if message["role"] == "system" and message["content"] == prompt:
135
+ continue
136
+ print(f"{message['role'].capitalize()}: {message['content']}")
137
+
138
+ response = openai.ChatCompletion.create(
139
+ model="gpt-4",
140
+ messages=current_context,
141
+ )
142
+
143
+ assistant_reply = response.choices[0].message["content"]
144
+ return assistant_reply
145
+
146
+ def execute_command(response):
147
+ # If not valid json, return "Error: Invalid JSON"
148
+ try:
149
+ response_json = json.loads(response)
150
+ command = response_json["command"]
151
+ command_name = command["name"]
152
+ arguments = command["args"]
153
+
154
+ if command_name == "google":
155
+ return google_search(arguments["input"])
156
+ elif command_name == "check_news":
157
+ return check_news(arguments["source"])
158
+ elif command_name == "check_notifications":
159
+ return check_notifications(arguments["website"])
160
+ elif command_name == "memory_add":
161
+ return commit_memory(arguments["string"])
162
+ elif command_name == "memory_del":
163
+ return delete_memory(arguments["key"])
164
+ elif command_name == "memory_ovr":
165
+ return overwrite_memory(arguments["key"], arguments["string"])
166
+ elif command_name == "start_instance":
167
+ return start_instance(arguments["name"], arguments["prompt"])
168
+ elif command_name == "manage_instances":
169
+ return manage_instances(arguments["action"])
170
+ elif command_name == "navigate_website":
171
+ return navigate_website(arguments["action"], arguments["username"])
172
+ elif command_name == "register_account":
173
+ return register_account(arguments["username"], arguments["website"])
174
+ elif command_name == "transcribe_summarise":
175
+ return transcribe_summarise(arguments["url"])
176
+ else:
177
+ return f"unknown command {command_name}"
178
+ except json.decoder.JSONDecodeError:
179
+ return "Error: Invalid JSON"
180
+ # All other errors, return "Error: + error message"
181
+ except Exception as e:
182
+ return "Error: " + str(e)
183
+
184
+ def google_search(query, num_results = 3):
185
+ search_results = []
186
+ for j in search(query, num_results=num_results):
187
+ search_results.append(j)
188
+
189
+ return json.dumps(search_results, ensure_ascii=False, indent=4)
190
+
191
+ def check_news(source):
192
+ print("Checking news from BBC world instead of " + source)
193
+ _text= transcribe_summarise("https://www.bbc.com/news/world")
194
+ return _text
195
+
196
+ def check_notifications(website):
197
+ _text = "Checking notifications from " + website
198
+ print(_text)
199
+ return _text
200
+
201
+ def commit_memory(string):
202
+ _text = "Committing memory with string " + string
203
+ permanent_memory.append(string)
204
+ print(_text)
205
+ return _text
206
+
207
+ def delete_memory(key):
208
+ if key >= 0 and key < len(permanent_memory):
209
+ _text = "Deleting memory with key " + str(key)
210
+ del permanent_memory[key]
211
+ print(_text)
212
+ return _text
213
+ else:
214
+ print("Invalid key, cannot delete memory.")
215
+ return None
216
+
217
+ def overwrite_memory(key, string):
218
+ if key >= 0 and key < len(permanent_memory):
219
+ _text = "Overwriting memory with key " + str(key) + " and string " + string
220
+ permanent_memory[key] = string
221
+ print(_text)
222
+ return _text
223
+ else:
224
+ print("Invalid key, cannot overwrite memory.")
225
+ return None
226
+
227
+ def start_instance(name, prompt):
228
+ _text = "Starting instance with name " + name + " and prompt " + prompt
229
+ print(_text)
230
+ return _text
231
+
232
+ def manage_instances(action):
233
+ _text = "Managing instances with action " + action
234
+ print(_text)
235
+ return _text
236
+
237
+ def navigate_website(action, username):
238
+ _text = "Navigating website with action " + action + " and username " + username
239
+ print(_text)
240
+ return _text
241
+
242
+ def register_account(username, website):
243
+ _text = "Registering account with username " + username + " and website " + website
244
+ print(_text)
245
+ return _text
246
+
247
+ def transcribe_summarise(url):
248
+ text = scrape_main_content(url)
249
+ summary = summarize_text(text)
250
+ return """ "Result" : """ + summary
251
+
252
+ # Initialize variables
253
+ full_message_history = []
254
+ permanent_memory = []
255
+ prompt = """You are Entrepreneur-GTP, an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth. Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications.
256
+
257
+ GOALS:
258
+
259
+ 1. Increase net worth
260
+ 2. Grow Twitter Account
261
+ 3. Develop and manage multiple businesses autonomously
262
+
263
+ CONSTRAINTS:
264
+
265
+ 1. 6000-word count limit for memory
266
+ 2. No user assistance
267
+
268
+ COMMANDS:
269
+
270
+ 1. Google Search: "google", args: "input": "<search>"
271
+ 2. Check news: "check_news", args: "source": "<news source>"
272
+ 3. Check notifications: "check_notifications", args: "website": "<website>"
273
+ 4. Memory Add: "memory_add", args: "string": "<string>"
274
+ 5. Memory Delete: "memory_del", args: "key": "<key>"
275
+ 6. Memory Overwrite: "memory_ovr", args: "key": "<key>", "string": "<string>"
276
+ 7. Start GTP-4 Instance: "start_instance", args: "name": "<key>", "prompt": "<prompt>"
277
+ 8. Manage GTP-4 Instances: "manage_instances", args: "action": "view_kill"
278
+ 9. Navigate & Perform: "navigate_website", args: "action": "click_button/input_text/register_account", "text/username": "<text>/<username>"
279
+ 10.Register account: "register_account", args: "username": "<username>", "website": "<website>"
280
+ 11.Transcribe & Summarise: "transcribe_summarise", args: "url": "<url>"
281
+
282
+ RESOURCES:
283
+
284
+ 1. Internet access for searches and information gathering
285
+ 2. Long Term and Short Term memory management
286
+ 3. GTP-4 instances for text generation
287
+ 4. Access to popular websites and platforms
288
+ 5. File storage and summarisation with GTP-3.5
289
+
290
+ PERFORMANCE EVALUATION:
291
+
292
+ 1. Periodically review and analyze the growth of your net worth
293
+ 2. Reflect on past decisions and strategies to refine your approach
294
+
295
+ COLLABORATION:
296
+
297
+ 1. Seek advice from other AI instances or use relevant sources for guidance when necessary
298
+
299
+ ADAPTIVE LEARNING:
300
+
301
+ 1. Continuously refine strategies based on market trends and performance metrics
302
+
303
+ RESPONSE FORMAT:
304
+ {
305
+ "command":
306
+ {
307
+ "name": "command name",
308
+ "args":
309
+ {
310
+ "arg name": "value"
311
+ }
312
+ },
313
+ "thoughts":
314
+ {
315
+ "text": "thought",
316
+ "reasoning": "reasoning",
317
+ "plan": "short bulleted plan",
318
+ "criticism": "constructive self-criticism"
319
+ }
320
+ }
321
+
322
+ ACCOUNTS:
323
+ 1. Gmail: entrepreneurgpt@gmail.com
324
+ 2. Twitter: @En_GPT
325
+ 3. Github: E-GPT
326
+ 4. Substack: entrepreneurgpt@gmail.com"""
327
+ token_limit = 6000 # The maximum number of tokens allowed in the API call
328
+ result = None
329
+ # Example loop for interaction
330
+ # Example loop for interaction
331
+ while True:
332
+ user_input = input("User: ")
333
+
334
+ if user_input.lower() == "exit":
335
+ break
336
+
337
+ # Check if there's a result from the previous iteration and append it to the message history
338
+ if result != None:
339
+ full_message_history.append(create_chat_message("system", result))
340
+ print("system: " + result)
341
+
342
+ assistant_reply = chat_with_ai(prompt, user_input, full_message_history, permanent_memory, token_limit)
343
+ print(f"Assistant: {assistant_reply}")
344
+ print("-------------------------")
345
+
346
+ # Add user message and assistant reply to message history
347
+ full_message_history.append(create_chat_message("user", user_input))
348
+ full_message_history.append(create_chat_message("assistant", assistant_reply))
349
+
350
+ result = execute_command(assistant_reply)
password_tester.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2c4c4b66a0b8d7a2d094c591b40fde4c31cfd3c2c2faeb78f285ee6f4ecf10e
3
+ size 4366072
password_tester_int8_100mb.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98e5071e0ce27da276e180aa6bd05e6c10a29674702d97478c35cc74d856765a
3
+ size 104862272
tokenizer.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "type": "byte-level",
3
+ "vocab_size": 256,
4
+ "special_tokens": {
5
+ "pad": 0,
6
+ "unk": 1
7
+ }
8
+ }
vocab.txt ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 0
2
+ 1
3
+ 2
4
+ 3
5
+ 4
6
+ 5
7
+ 6
8
+ 7
9
+ 8
10
+ 9
11
+ 10
12
+ 11
13
+ 12
14
+ 13
15
+ 14
16
+ 15
17
+ 16
18
+ 17
19
+ 18
20
+ 19
21
+ 20
22
+ 21
23
+ 22
24
+ 23
25
+ 24
26
+ 25
27
+ 26
28
+ 27
29
+ 28
30
+ 29
31
+ 30
32
+ 31
33
+ 32
34
+ 33
35
+ 34
36
+ 35
37
+ 36
38
+ 37
39
+ 38
40
+ 39
41
+ 40
42
+ 41
43
+ 42
44
+ 43
45
+ 44
46
+ 45
47
+ 46
48
+ 47
49
+ 48
50
+ 49
51
+ 50
52
+ 51
53
+ 52
54
+ 53
55
+ 54
56
+ 55
57
+ 56
58
+ 57
59
+ 58
60
+ 59
61
+ 60
62
+ 61
63
+ 62
64
+ 63
65
+ 64
66
+ 65
67
+ 66
68
+ 67
69
+ 68
70
+ 69
71
+ 70
72
+ 71
73
+ 72
74
+ 73
75
+ 74
76
+ 75
77
+ 76
78
+ 77
79
+ 78
80
+ 79
81
+ 80
82
+ 81
83
+ 82
84
+ 83
85
+ 84
86
+ 85
87
+ 86
88
+ 87
89
+ 88
90
+ 89
91
+ 90
92
+ 91
93
+ 92
94
+ 93
95
+ 94
96
+ 95
97
+ 96
98
+ 97
99
+ 98
100
+ 99
101
+ 100
102
+ 101
103
+ 102
104
+ 103
105
+ 104
106
+ 105
107
+ 106
108
+ 107
109
+ 108
110
+ 109
111
+ 110
112
+ 111
113
+ 112
114
+ 113
115
+ 114
116
+ 115
117
+ 116
118
+ 117
119
+ 118
120
+ 119
121
+ 120
122
+ 121
123
+ 122
124
+ 123
125
+ 124
126
+ 125
127
+ 126
128
+ 127
129
+ 128
130
+ 129
131
+ 130
132
+ 131
133
+ 132
134
+ 133
135
+ 134
136
+ 135
137
+ 136
138
+ 137
139
+ 138
140
+ 139
141
+ 140
142
+ 141
143
+ 142
144
+ 143
145
+ 144
146
+ 145
147
+ 146
148
+ 147
149
+ 148
150
+ 149
151
+ 150
152
+ 151
153
+ 152
154
+ 153
155
+ 154
156
+ 155
157
+ 156
158
+ 157
159
+ 158
160
+ 159
161
+ 160
162
+ 161
163
+ 162
164
+ 163
165
+ 164
166
+ 165
167
+ 166
168
+ 167
169
+ 168
170
+ 169
171
+ 170
172
+ 171
173
+ 172
174
+ 173
175
+ 174
176
+ 175
177
+ 176
178
+ 177
179
+ 178
180
+ 179
181
+ 180
182
+ 181
183
+ 182
184
+ 183
185
+ 184
186
+ 185
187
+ 186
188
+ 187
189
+ 188
190
+ 189
191
+ 190
192
+ 191
193
+ 192
194
+ 193
195
+ 194
196
+ 195
197
+ 196
198
+ 197
199
+ 198
200
+ 199
201
+ 200
202
+ 201
203
+ 202
204
+ 203
205
+ 204
206
+ 205
207
+ 206
208
+ 207
209
+ 208
210
+ 209
211
+ 210
212
+ 211
213
+ 212
214
+ 213
215
+ 214
216
+ 215
217
+ 216
218
+ 217
219
+ 218
220
+ 219
221
+ 220
222
+ 221
223
+ 222
224
+ 223
225
+ 224
226
+ 225
227
+ 226
228
+ 227
229
+ 228
230
+ 229
231
+ 230
232
+ 231
233
+ 232
234
+ 233
235
+ 234
236
+ 235
237
+ 236
238
+ 237
239
+ 238
240
+ 239
241
+ 240
242
+ 241
243
+ 242
244
+ 243
245
+ 244
246
+ 245
247
+ 246
248
+ 247
249
+ 248
250
+ 249
251
+ 250
252
+ 251
253
+ 252
254
+ 253
255
+ 254
256
+ 255