Kotta commited on
Commit ·
49be943
1
Parent(s): 16dced3
bugfix(#136): autogpt interface issue fixed for infinit loop and applied with finish command as well.
Browse files
Brain/src/firebase/firebase.py
CHANGED
|
@@ -3,6 +3,7 @@ import os
|
|
| 3 |
from typing import Any
|
| 4 |
|
| 5 |
import firebase_admin
|
|
|
|
| 6 |
from firebase_admin import credentials
|
| 7 |
|
| 8 |
from Brain.src.common.assembler import Assembler
|
|
@@ -62,3 +63,15 @@ def get_firebase_cred(setting: ReqModel):
|
|
| 62 |
else:
|
| 63 |
cred = json.loads(setting.firebase_key)
|
| 64 |
return credentials.Certificate(cred)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
from typing import Any
|
| 4 |
|
| 5 |
import firebase_admin
|
| 6 |
+
from firebase_admin import db
|
| 7 |
from firebase_admin import credentials
|
| 8 |
|
| 9 |
from Brain.src.common.assembler import Assembler
|
|
|
|
| 63 |
else:
|
| 64 |
cred = json.loads(setting.firebase_key)
|
| 65 |
return credentials.Certificate(cred)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
"""
|
| 69 |
+
delete data from real time database of firebase using reference link
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def delete_data_from_realtime(
|
| 74 |
+
reference_link: str, firebase_app: firebase_admin.App
|
| 75 |
+
) -> None:
|
| 76 |
+
ref = db.reference(reference_link, app=firebase_app)
|
| 77 |
+
ref.delete()
|
Brain/src/rising_plugin/llm/autogpt_llm.py
CHANGED
|
@@ -27,6 +27,11 @@ from marshmallow import ValidationError
|
|
| 27 |
|
| 28 |
import faiss
|
| 29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
|
| 31 |
class AutoGPTLLM:
|
| 32 |
"""autogpt run method to get the expected result"""
|
|
@@ -52,6 +57,12 @@ class AutoGPTLLM:
|
|
| 52 |
# Discontinue if continuous limit is reached
|
| 53 |
loop_count += 1
|
| 54 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
# Send message to AI, get response
|
| 56 |
assistant_reply = agent.chain.run(
|
| 57 |
goals=goals,
|
|
|
|
| 27 |
|
| 28 |
import faiss
|
| 29 |
|
| 30 |
+
from Brain.src.rising_plugin.llm.llms import (
|
| 31 |
+
MAX_AUTO_THINKING,
|
| 32 |
+
get_finish_command_for_auto_task,
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
|
| 36 |
class AutoGPTLLM:
|
| 37 |
"""autogpt run method to get the expected result"""
|
|
|
|
| 57 |
# Discontinue if continuous limit is reached
|
| 58 |
loop_count += 1
|
| 59 |
|
| 60 |
+
# validation thinking counter
|
| 61 |
+
if loop_count == MAX_AUTO_THINKING:
|
| 62 |
+
# add finish command of the command
|
| 63 |
+
ref.push().set(get_finish_command_for_auto_task())
|
| 64 |
+
break
|
| 65 |
+
|
| 66 |
# Send message to AI, get response
|
| 67 |
assistant_reply = agent.chain.run(
|
| 68 |
goals=goals,
|
Brain/src/rising_plugin/llm/llms.py
CHANGED
|
@@ -20,6 +20,8 @@ GPT_LLM_MODELS = [GPT_3_5_TURBO, GPT_4, GPT_4_32K]
|
|
| 20 |
"""exception message"""
|
| 21 |
EXCEPTION_MSG = f"The model is not correct. It should be in {LLM_MODELS}"
|
| 22 |
|
|
|
|
|
|
|
| 23 |
|
| 24 |
"""prompt"""
|
| 25 |
CATEGORY_PROMPT = """
|
|
@@ -99,3 +101,17 @@ def get_llm(
|
|
| 99 |
elif model == FALCON_7B:
|
| 100 |
llm = FalconLLM(temperature=temperature, max_new_tokens=max_new_tokens)
|
| 101 |
return llm
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
"""exception message"""
|
| 21 |
EXCEPTION_MSG = f"The model is not correct. It should be in {LLM_MODELS}"
|
| 22 |
|
| 23 |
+
"""maximum auto achievement counter"""
|
| 24 |
+
MAX_AUTO_THINKING = 10
|
| 25 |
|
| 26 |
"""prompt"""
|
| 27 |
CATEGORY_PROMPT = """
|
|
|
|
| 101 |
elif model == FALCON_7B:
|
| 102 |
llm = FalconLLM(temperature=temperature, max_new_tokens=max_new_tokens)
|
| 103 |
return llm
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
"""
|
| 107 |
+
generate finish command and response for auto achievement
|
| 108 |
+
"""
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def get_finish_command_for_auto_task() -> Any:
|
| 112 |
+
return {
|
| 113 |
+
"command": {
|
| 114 |
+
"args": {"response": "I have finished all my objectives."},
|
| 115 |
+
"name": "finish",
|
| 116 |
+
}
|
| 117 |
+
}
|
Brain/src/service/auto_task_service.py
CHANGED
|
@@ -1,27 +1,17 @@
|
|
| 1 |
"""auto task management to get the expected output"""
|
| 2 |
import firebase_admin
|
| 3 |
-
from firebase_admin import db
|
| 4 |
|
| 5 |
from Brain.src.model.req_model import ReqModel
|
| 6 |
from Brain.src.rising_plugin.llm.autogpt_llm import AutoGPTLLM
|
| 7 |
import time
|
| 8 |
-
import asyncio
|
| 9 |
import threading
|
| 10 |
|
| 11 |
-
"""delete data from real time database of firebase using reference link
|
| 12 |
-
"""
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
def delete_db_data(reference_link: str, firebase_app: firebase_admin.App):
|
| 16 |
-
ref = db.reference(reference_link, app=firebase_app)
|
| 17 |
-
ref.delete()
|
| 18 |
-
|
| 19 |
|
| 20 |
class AutoTaskService:
|
| 21 |
-
"""self task
|
| 22 |
response -> reference_link :str"""
|
| 23 |
|
| 24 |
-
def
|
| 25 |
self, query: str, firebase_app: firebase_admin.App, setting: ReqModel
|
| 26 |
) -> str:
|
| 27 |
# init autogpt llm
|
|
@@ -39,9 +29,11 @@ class AutoTaskService:
|
|
| 39 |
|
| 40 |
return reference_link
|
| 41 |
|
| 42 |
-
"""
|
|
|
|
| 43 |
response type:
|
| 44 |
-
/auto/{llm_name}_{uuid}_{timestamp}
|
|
|
|
| 45 |
|
| 46 |
def generate_reference_link(self, llm_name: str, uuid: str) -> str:
|
| 47 |
milliseconds = int(time.time() * 1000)
|
|
|
|
| 1 |
"""auto task management to get the expected output"""
|
| 2 |
import firebase_admin
|
|
|
|
| 3 |
|
| 4 |
from Brain.src.model.req_model import ReqModel
|
| 5 |
from Brain.src.rising_plugin.llm.autogpt_llm import AutoGPTLLM
|
| 6 |
import time
|
|
|
|
| 7 |
import threading
|
| 8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
|
| 10 |
class AutoTaskService:
|
| 11 |
+
"""self task achievement with autogpt based on langchain
|
| 12 |
response -> reference_link :str"""
|
| 13 |
|
| 14 |
+
def ask_task_with_llm(
|
| 15 |
self, query: str, firebase_app: firebase_admin.App, setting: ReqModel
|
| 16 |
) -> str:
|
| 17 |
# init autogpt llm
|
|
|
|
| 29 |
|
| 30 |
return reference_link
|
| 31 |
|
| 32 |
+
"""
|
| 33 |
+
generate reference link for autoTask
|
| 34 |
response type:
|
| 35 |
+
/auto/{llm_name}_{uuid}_{timestamp}
|
| 36 |
+
"""
|
| 37 |
|
| 38 |
def generate_reference_link(self, llm_name: str, uuid: str) -> str:
|
| 39 |
milliseconds = int(time.time() * 1000)
|