Kotta commited on
Commit ·
74f7ac4
1
Parent(s): 867ec7f
feature(#23): implement realtime pipeline with firebase realtime database for autogpt result.
Browse files
Brain/src/common/utils.py
CHANGED
|
@@ -17,6 +17,7 @@ API_URL = "http://localhost:5000/file/swagger.json"
|
|
| 17 |
# firebase
|
| 18 |
FIREBASE_STORAGE_ROOT = "images/"
|
| 19 |
FIREBASE_STORAGE_BUCKET = "test3-83ffc.appspot.com"
|
|
|
|
| 20 |
|
| 21 |
# pinecone
|
| 22 |
PINECONE_NAMESPACE = "risinglangchain-namespace"
|
|
|
|
| 17 |
# firebase
|
| 18 |
FIREBASE_STORAGE_ROOT = "images/"
|
| 19 |
FIREBASE_STORAGE_BUCKET = "test3-83ffc.appspot.com"
|
| 20 |
+
FIREBASE_REALTIME_DATABASE = "https://test3-83ffc-default-rtdb.firebaseio.com/"
|
| 21 |
|
| 22 |
# pinecone
|
| 23 |
PINECONE_NAMESPACE = "risinglangchain-namespace"
|
Brain/src/firebase/firebase.py
CHANGED
|
@@ -8,7 +8,7 @@ from firebase_admin import credentials
|
|
| 8 |
from Brain.src.common.assembler import Assembler
|
| 9 |
from Brain.src.common.brain_exception import BrainException
|
| 10 |
from Brain.src.common.http_response_codes import responses
|
| 11 |
-
from Brain.src.common.utils import FIREBASE_STORAGE_BUCKET
|
| 12 |
from Brain.src.logs import logger
|
| 13 |
from Brain.src.model.req_model import ReqModel
|
| 14 |
from Brain.src.model.requests.request_model import BasicReq
|
|
@@ -30,7 +30,10 @@ def initialize_app(setting: ReqModel) -> firebase_admin.App:
|
|
| 30 |
)
|
| 31 |
return firebase_admin.initialize_app(
|
| 32 |
get_firebase_cred(setting),
|
| 33 |
-
{
|
|
|
|
|
|
|
|
|
|
| 34 |
name=app_name,
|
| 35 |
)
|
| 36 |
|
|
|
|
| 8 |
from Brain.src.common.assembler import Assembler
|
| 9 |
from Brain.src.common.brain_exception import BrainException
|
| 10 |
from Brain.src.common.http_response_codes import responses
|
| 11 |
+
from Brain.src.common.utils import FIREBASE_STORAGE_BUCKET, FIREBASE_REALTIME_DATABASE
|
| 12 |
from Brain.src.logs import logger
|
| 13 |
from Brain.src.model.req_model import ReqModel
|
| 14 |
from Brain.src.model.requests.request_model import BasicReq
|
|
|
|
| 30 |
)
|
| 31 |
return firebase_admin.initialize_app(
|
| 32 |
get_firebase_cred(setting),
|
| 33 |
+
{
|
| 34 |
+
"storageBucket": FIREBASE_STORAGE_BUCKET,
|
| 35 |
+
"databaseURL": FIREBASE_REALTIME_DATABASE,
|
| 36 |
+
},
|
| 37 |
name=app_name,
|
| 38 |
)
|
| 39 |
|
Brain/src/rising_plugin/llm/autogpt_llm.py
CHANGED
|
@@ -1,5 +1,8 @@
|
|
| 1 |
"""autogpt plugin with langchain"""
|
|
|
|
| 2 |
|
|
|
|
|
|
|
| 3 |
from langchain.experimental import AutoGPT
|
| 4 |
from langchain.chat_models import ChatOpenAI
|
| 5 |
from langchain.experimental.autonomous_agents.autogpt.prompt_generator import (
|
|
@@ -28,7 +31,17 @@ import faiss
|
|
| 28 |
class AutoGPTLLM:
|
| 29 |
"""autogpt run method to get the expected result"""
|
| 30 |
|
| 31 |
-
def run(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
user_input = (
|
| 33 |
"Determine which next command to use, "
|
| 34 |
"and respond using the format specified above:"
|
|
@@ -46,7 +59,10 @@ class AutoGPTLLM:
|
|
| 46 |
memory=agent.memory,
|
| 47 |
user_input=user_input,
|
| 48 |
)
|
|
|
|
|
|
|
| 49 |
|
|
|
|
| 50 |
# Print Assistant thoughts
|
| 51 |
print(assistant_reply)
|
| 52 |
agent.full_message_history.append(HumanMessage(content=user_input))
|
|
@@ -91,12 +107,16 @@ class AutoGPTLLM:
|
|
| 91 |
|
| 92 |
agent.memory.add_documents([Document(page_content=memory_to_add)])
|
| 93 |
agent.full_message_history.append(SystemMessage(content=result))
|
|
|
|
|
|
|
| 94 |
|
| 95 |
"""function to manage auto-task achievement
|
| 96 |
ex: query = write a weather report for SF today
|
| 97 |
"""
|
| 98 |
|
| 99 |
-
def ask_task(
|
|
|
|
|
|
|
| 100 |
search = SerpAPIWrapper()
|
| 101 |
tools = [
|
| 102 |
Tool(
|
|
@@ -127,4 +147,9 @@ class AutoGPTLLM:
|
|
| 127 |
)
|
| 128 |
# Set verbose to be true
|
| 129 |
agent.chain.verbose = True
|
| 130 |
-
self.run(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
"""autogpt plugin with langchain"""
|
| 2 |
+
import json
|
| 3 |
|
| 4 |
+
import firebase_admin
|
| 5 |
+
from firebase_admin import db
|
| 6 |
from langchain.experimental import AutoGPT
|
| 7 |
from langchain.chat_models import ChatOpenAI
|
| 8 |
from langchain.experimental.autonomous_agents.autogpt.prompt_generator import (
|
|
|
|
| 31 |
class AutoGPTLLM:
|
| 32 |
"""autogpt run method to get the expected result"""
|
| 33 |
|
| 34 |
+
def run(
|
| 35 |
+
self,
|
| 36 |
+
agent: AutoGPT,
|
| 37 |
+
goals: List[str],
|
| 38 |
+
firebase_app: firebase_admin.App,
|
| 39 |
+
reference_link: str,
|
| 40 |
+
) -> str:
|
| 41 |
+
"""firebase realtime database init"""
|
| 42 |
+
ref = db.reference(reference_link, app=firebase_app)
|
| 43 |
+
|
| 44 |
+
"""autogpt engine"""
|
| 45 |
user_input = (
|
| 46 |
"Determine which next command to use, "
|
| 47 |
"and respond using the format specified above:"
|
|
|
|
| 59 |
memory=agent.memory,
|
| 60 |
user_input=user_input,
|
| 61 |
)
|
| 62 |
+
# update the result with the assistant_reply in firebase realtime database
|
| 63 |
+
ref.push().set(json.loads(assistant_reply))
|
| 64 |
|
| 65 |
+
# update chat history in autogpt agent
|
| 66 |
# Print Assistant thoughts
|
| 67 |
print(assistant_reply)
|
| 68 |
agent.full_message_history.append(HumanMessage(content=user_input))
|
|
|
|
| 107 |
|
| 108 |
agent.memory.add_documents([Document(page_content=memory_to_add)])
|
| 109 |
agent.full_message_history.append(SystemMessage(content=result))
|
| 110 |
+
# add result of the command
|
| 111 |
+
ref.push().set({"result": result})
|
| 112 |
|
| 113 |
"""function to manage auto-task achievement
|
| 114 |
ex: query = write a weather report for SF today
|
| 115 |
"""
|
| 116 |
|
| 117 |
+
def ask_task(
|
| 118 |
+
self, query: str, firebase_app: firebase_admin.App, reference_link: str
|
| 119 |
+
):
|
| 120 |
search = SerpAPIWrapper()
|
| 121 |
tools = [
|
| 122 |
Tool(
|
|
|
|
| 147 |
)
|
| 148 |
# Set verbose to be true
|
| 149 |
agent.chain.verbose = True
|
| 150 |
+
self.run(
|
| 151 |
+
agent=agent,
|
| 152 |
+
goals=[query],
|
| 153 |
+
firebase_app=firebase_app,
|
| 154 |
+
reference_link=reference_link,
|
| 155 |
+
)
|
Brain/src/router/api.py
CHANGED
|
@@ -69,8 +69,6 @@ def construct_blueprint_api() -> APIRouter:
|
|
| 69 |
# cloud message
|
| 70 |
cloud_message = CloudMessage(firebase_app=firebase_app)
|
| 71 |
|
| 72 |
-
# test
|
| 73 |
-
|
| 74 |
# parsing params
|
| 75 |
query = data.message
|
| 76 |
token = setting.token
|
|
|
|
| 69 |
# cloud message
|
| 70 |
cloud_message = CloudMessage(firebase_app=firebase_app)
|
| 71 |
|
|
|
|
|
|
|
| 72 |
# parsing params
|
| 73 |
query = data.message
|
| 74 |
token = setting.token
|
Brain/src/service/auto_task_service.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""auto task management to get the expected output"""
|
| 2 |
+
import firebase_admin
|
| 3 |
+
|
| 4 |
+
from Brain.src.model.req_model import ReqModel
|
| 5 |
+
from Brain.src.rising_plugin.llm.autogpt_llm import AutoGPTLLM
|
| 6 |
+
import time
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class AutoTaskService:
|
| 10 |
+
"""self task archivement with autogpt based on langchain
|
| 11 |
+
response -> reference_link :str"""
|
| 12 |
+
|
| 13 |
+
def ask_task_with_autogpt(
|
| 14 |
+
self, query: str, firebase_app: firebase_admin.App, setting: ReqModel
|
| 15 |
+
) -> str:
|
| 16 |
+
# init autogpt llm
|
| 17 |
+
autogpt_llm = AutoGPTLLM()
|
| 18 |
+
|
| 19 |
+
# generate reference link
|
| 20 |
+
reference_link = self.generate_reference_link(
|
| 21 |
+
llm_name="autogpt", uuid=setting.uuid
|
| 22 |
+
)
|
| 23 |
+
# call autogpt
|
| 24 |
+
autogpt_llm.ask_task(
|
| 25 |
+
query=query, firebase_app=firebase_app, reference_link=reference_link
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
return reference_link
|
| 29 |
+
|
| 30 |
+
"""generate reference link for autoTask
|
| 31 |
+
response type:
|
| 32 |
+
/auto/{llm_name}_{uuid}_{timestamp}"""
|
| 33 |
+
|
| 34 |
+
def generate_reference_link(self, llm_name: str, uuid: str) -> str:
|
| 35 |
+
milliseconds = int(time.time() * 1000)
|
| 36 |
+
return f"/auto/{llm_name}_{uuid}_{milliseconds}"
|
requirements.txt
CHANGED
|
@@ -69,4 +69,7 @@ yarl==1.8.2
|
|
| 69 |
twilio==8.2.1
|
| 70 |
nemoguardrails==0.2.0
|
| 71 |
user-agents==2.2.0
|
|
|
|
|
|
|
|
|
|
| 72 |
tiktoken==0.4.0
|
|
|
|
| 69 |
twilio==8.2.1
|
| 70 |
nemoguardrails==0.2.0
|
| 71 |
user-agents==2.2.0
|
| 72 |
+
faiss-cpu==1.7.4
|
| 73 |
+
google-search-results==2.4.2
|
| 74 |
+
tiktoken==0.4.0
|
| 75 |
tiktoken==0.4.0
|