| """Agnet Service""" |
| import time |
|
|
| from openai.error import RateLimitError |
|
|
| from Brain.src.common.utils import AGENT_NAME, DEFAULT_GPT_MODEL |
| from Brain.src.rising_plugin.risingplugin import handle_chat_completion |
| from Brain.src.logs import logger |
| from Brain.src.model.chat_response_model import ChatResponseModel |
| from Brain.src.model.message_model import MessageModel |
|
|
|
|
| class ChatService: |
| def __init__(self, ai_name=AGENT_NAME, llm_model=DEFAULT_GPT_MODEL): |
| self.ai_name = ai_name |
| self.llm_model = llm_model |
|
|
| def generate_context(self, prompt, relevant_memory, full_message_history, model): |
| current_context = [ |
| |
| |
| |
| ] |
|
|
| |
| next_message_to_add_index = len(full_message_history) - 1 |
| insertion_index = len(current_context) |
| return ( |
| next_message_to_add_index, |
| insertion_index, |
| current_context, |
| ) |
|
|
| |
| def chat_with_ai( |
| self, |
| prompt, |
| user_input, |
| full_message_history, |
| permanent_memory, |
| ) -> ChatResponseModel: |
| """Interact with the OpenAI API, sending the prompt, user input, message history, |
| and permanent memory.""" |
| while True: |
| try: |
| """ |
| Interact with the OpenAI API, sending the prompt, user input, |
| message history, and permanent memory. |
| |
| Args: |
| prompt (str): The prompt explaining the rules to the AI. |
| user_input (str): The input from the user. |
| full_message_history (list): The list of all messages sent between the |
| user and the AI. |
| permanent_memory (Obj): The memory object containing the permanent |
| memory. |
| token_limit (int): The maximum number of tokens allowed in the API call. |
| |
| Returns: |
| str: The AI's response. |
| """ |
| model = self.llm_model |
| logger.debug(f"Chat with AI on model : {model}") |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| relevant_memory = "" |
| |
|
|
| ( |
| next_message_to_add_index, |
| insertion_index, |
| current_context, |
| ) = self.generate_context( |
| prompt, relevant_memory, full_message_history, model |
| ) |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| while next_message_to_add_index >= 0: |
| |
| message_to_add = full_message_history[next_message_to_add_index] |
|
|
| |
| |
| current_context.insert(insertion_index, message_to_add.to_json()) |
|
|
| |
| next_message_to_add_index -= 1 |
|
|
| |
| current_context.extend( |
| [MessageModel.create_chat_message("user", user_input)] |
| ) |
|
|
| logger.debug("------------ CONTEXT SENT TO AI ---------------") |
| for message in current_context: |
| |
| if message["role"] == "system" and message["content"] == prompt: |
| continue |
| logger.debug( |
| f"{message['role'].capitalize()}: {message['content']}" |
| ) |
| logger.debug("") |
| logger.debug("----------- END OF CONTEXT ----------------") |
|
|
| |
| |
| return ChatResponseModel( |
| handle_chat_completion(model=model, messages=current_context) |
| ) |
| except Exception as e: |
| |
| logger.warn("Error: ", "API Rate Limit Reached. Waiting 10 seconds...") |
| raise e |
|
|