| from llama_cpp import Llama | |
| class AgGPT: | |
| def __init__(self, model_path): | |
| self.model_path = model_path | |
| self.model = Llama(model_path=model_path, n_ctx=4048, n_gpu_layers=35) | |
| def run(self): | |
| while True: | |
| prompt = input("\nEnter your prompt: ") | |
| messages = [ | |
| {"role": "system", "content": f"You are AgGPT-10, an AGI system. The user said: '{prompt}'."}, | |
| {"role": "user", "content": prompt} | |
| ] | |
| output = self.model.create_chat_completion(messages, max_tokens=2050, temperature=0.7) | |
| print(output["choices"][0]["message"]["content"]) | |
| def ask(self, question): | |
| ''' Ask AgGPT-10 a question and return the answer. ''' | |
| messages = [ | |
| {"role": "system", "content": f"You are AgGPT-10, an AGI system. The user said: '{question}'."}, | |
| {"role": "user", "content": question} | |
| ] | |
| output = self.model.create_chat_completion(messages, max_tokens=2050, temperature=0.7) | |
| return output["choices"][0]["message"]["content"] | |
| if __name__ == "__main__": | |
| model_path = "agent.gguf" | |
| aggpt = AgGPT(model_path) | |
| aggpt.run() | |