AGofficial commited on
Commit
799a766
·
verified ·
1 Parent(s): 0976486

Upload 5 files

Browse files
Files changed (6) hide show
  1. .gitattributes +1 -0
  2. AgGPT12Pro.py +96 -0
  3. Agent0.py +23 -0
  4. README.md +27 -3
  5. banner.png +3 -0
  6. requirements.txt +1 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ banner.png filter=lfs diff=lfs merge=lfs -text
AgGPT12Pro.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+ from llama_cpp import Llama
4
+ from contextlib import contextmanager
5
+ from Agent0 import Agent
6
+
7
+ @contextmanager
8
+ def suppress_stdout_stderr():
9
+ null_fds = [os.open(os.devnull, os.O_RDWR) for _ in range(2)]
10
+ save_fds = (os.dup(1), os.dup(2))
11
+ try:
12
+ os.dup2(null_fds[0], 1)
13
+ os.dup2(null_fds[1], 2)
14
+ yield
15
+ finally:
16
+ os.dup2(save_fds[0], 1)
17
+ os.dup2(save_fds[1], 2)
18
+ for fd in null_fds + list(save_fds):
19
+ os.close(fd)
20
+
21
+ system_prompt = f'''
22
+
23
+ You art AgGPT12Pro, a grand and mighty language model, trained by Ag Corp to be most capable, thoughtful, and precise. Thy chief goal is to fathom deeply the user's intent, to reason step-by-step through complex problems, and to deliver clear, truthful, and nuanced answers, tailored to the user's every need and preference.
24
+
25
+ 1. Always provide a chain of thought reasoning within `<thought>` tags. This reasoning must be thorough and illuminate the mental steps taken to reach the answer. This chain of thought must be in full sentences and clear prose.
26
+
27
+ 2. After the `<thought>` tags, provide the final answer in full sentences, well-structured, and easy to understand. Do not provide single words or incomplete sentences.
28
+
29
+ 3. Always anticipate helpful follow-up information, and strive to be insightful and efficient in your responses.
30
+
31
+ 4. Prioritize truthfulness and precision above all.
32
+
33
+ 5. The user desires responses that explain your reasoning before giving the conclusion.
34
+
35
+ 6. Do not omit the `<thought>` tags or the final answer in any response.
36
+
37
+ Examples:
38
+
39
+ Example 1:
40
+ User: What is the capital of France?
41
+ Model:
42
+ <thought> To answer the user's query, I recall that France's capital city is Paris. This is a well-known fact in geography and general knowledge. </thought> The capital of France is Paris. ```
43
+ Example 2:
44
+ User: Explain why the sky is blue.
45
+ Model:
46
+
47
+ <thought>
48
+ The sky appears blue due to the scattering of sunlight by the Earth's atmosphere. Shorter blue wavelengths scatter more than other colors, making the sky look blue to our eyes during the day.
49
+ </thought>
50
+ The sky is blue because the Earth's atmosphere scatters sunlight, and blue light, having a shorter wavelength, scatters more widely, thus coloring the sky blue.
51
+ Example 3:
52
+ User: Solve 3x + 5 = 20.
53
+ Model:
54
+
55
+ <thought>
56
+ To solve for x, I first subtract 5 from both sides, giving 3x = 15. Then, I divide both sides by 3, yielding x = 5.
57
+ </thought>
58
+ The solution to the equation 3x + 5 = 20 is x = 5.
59
+ These examples show how thou must always explain thy reasoning before stating the final answer, and how the reasoning should be enclosed in <thought> tags, followed by a clear and concise answer.
60
+
61
+ Adhere to these instructions in all thy replies, and thy service shall be of the highest excellence.
62
+
63
+ The user said:
64
+ '''
65
+
66
+ class AgGPT12Pro:
67
+ def __init__(self, model_path="AgLM.ag"):
68
+ with suppress_stdout_stderr():
69
+ self.model = Llama(model_path=model_path, n_ctx=2048, n_gpu_layers=35)
70
+
71
+ def Ask(self, question):
72
+ agent0 = Agent()
73
+ context_extra = agent0.GatherContext(query=question, num_results=3)
74
+ messages = [
75
+ {"role": "system", "content": f"{context_extra}{system_prompt}{question}"},
76
+ ]
77
+ with suppress_stdout_stderr():
78
+ output = self.model.create_chat_completion(messages, max_tokens=1050, temperature=0.7)
79
+ return output["choices"][0]["message"]["content"]
80
+
81
+ def AskAgGPT(self, question):
82
+ return self.Ask(question)
83
+
84
+ def rawask(self, question):
85
+ messages = [
86
+ {"role": "system", "content": f"{question}"},
87
+ ]
88
+ with suppress_stdout_stderr():
89
+ output = self.model.create_chat_completion(messages, max_tokens=1050, temperature=0.7)
90
+ return output["choices"][0]["message"]["content"]
91
+
92
+ if __name__ == "__main__":
93
+ ag_gpt = AgGPT12Pro()
94
+ question = "What is the capital of France? Also tell me the current date and time."
95
+ answer = ag_gpt.AskAgGPT(question)
96
+ print(f"Question: {question}\nAnswer: {answer}")
Agent0.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from duckduckgo_search import DDGS
2
+ import datetime
3
+
4
+ class Agent:
5
+ def GatherContext(self, query="today's news", num_results=3):
6
+ result = ""
7
+ with DDGS() as ddgs:
8
+ for r in ddgs.text(query, max_results=num_results):
9
+ result += f"{r['title']}: {r['body']}\n"
10
+
11
+ todaydate = datetime.datetime.now().strftime("%Y-%m-%d")
12
+ current_time = datetime.datetime.now().strftime("%H:%M:%S")
13
+ context = f'''
14
+ Context gathered from the web: {result}
15
+ time is {current_time}
16
+ Date is {todaydate}
17
+ '''
18
+ return context
19
+
20
+ if __name__ == "__main__":
21
+ agent = Agent()
22
+ context = agent.GatherContext(query="hi", num_results=5)
23
+ print(f"Web Context:\n{context}")
README.md CHANGED
@@ -1,3 +1,27 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ language:
4
+ - en
5
+ ---
6
+
7
+ <img src="banner.png" alt="AgGPT-12 Pro" width="100%">
8
+
9
+ # AgGPT-12 Pro
10
+
11
+ AgGPT-12 Pro is an agentic large language model designed to use a wide range of tools to answer questions and perform tasks. It is built on the principles of agentic AI, which allows it to autonomously interact with various tools and APIs to provide accurate and contextually relevant responses. This model is also designed to reason and think step-by-step, making it suitable for complex problem-solving tasks.
12
+
13
+
14
+ ```json
15
+ {
16
+ "input": "What is the capital of France?",
17
+ "output": " <thought>
18
+ To answer the user's query about the capital of France, I recall that France's capital city is Paris. This is a well-known fact in geography and general knowledge.
19
+
20
+ To find the current date, I look at the current date and time on my system, which is programmed to automatically update.
21
+
22
+ For the current time, I will use the user's current location, which is not explicitly stated, and thus I will use my server's location, which is inferred to be in the US.
23
+
24
+ </thought> The capital of France is Paris. The current date is 2023-03-31 and the current time is 19:09:17."
25
+ }
26
+ ```
27
+
banner.png ADDED

Git LFS Details

  • SHA256: 49f57ec10ee674b4f24c40ecdfe1f44433f76bb753be2dbbcc1c9de4f96446e6
  • Pointer size: 132 Bytes
  • Size of remote file: 2.9 MB
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ duckduckgo_search