Kotta commited on
Commit
51b69ae
·
1 Parent(s): bc41366

feature(#23): updated prompts to categorify the input and applied for autogpt to get its achievement.

Browse files
Brain/src/common/assembler.py CHANGED
@@ -57,7 +57,7 @@ class Assembler:
57
 
58
  """mapping result type into json
59
  {
60
- "program": sms | contacts | browser | select_item_detail_info,
61
  "content": string
62
  }
63
  """
 
57
 
58
  """mapping result type into json
59
  {
60
+ "program": sms | contacts | browser | selectitemdetailinfo,
61
  "content": string
62
  }
63
  """
Brain/src/common/program_type.py CHANGED
@@ -10,15 +10,15 @@ class ProgramType:
10
  MESSAGE = "message"
11
 
12
  class BrowserType:
13
- OPEN_TAB = "open_tab"
14
- OPEN_TAB_SEARCH = "open_tab_search"
15
- CLOSE_TAB = "close_tab"
16
- PREVIOUS_PAGE = "previous_page"
17
- NEXT_PAGE = "next_page"
18
- SCROLL_UP = "scroll_up"
19
- SCROLL_DOWN = "scroll_down"
20
- SCROLL_TOP = "scroll_top"
21
- SCROLL_BOTTOM = "scroll_bottom"
22
- SELECT_ITEM_DETAIL_INFO = "select_item_detail_info"
23
- SELECT_ITEM = "select_item"
24
  MESSAGE = "message"
 
10
  MESSAGE = "message"
11
 
12
  class BrowserType:
13
+ OPEN_TAB = "opentab"
14
+ OPEN_TAB_SEARCH = "opentabsearch"
15
+ CLOSE_TAB = "closetab"
16
+ PREVIOUS_PAGE = "previouspage"
17
+ NEXT_PAGE = "nextpage"
18
+ SCROLL_UP = "scrollup"
19
+ SCROLL_DOWN = "scrolldown"
20
+ SCROLL_TOP = "scrolltop"
21
+ SCROLL_BOTTOM = "scrollbottom"
22
+ SELECT_ITEM_DETAIL_INFO = "selectitemdetailinfo"
23
+ SELECT_ITEM = "selectitem"
24
  MESSAGE = "message"
Brain/src/common/utils.py CHANGED
@@ -40,6 +40,10 @@ AUTH_TOKEN = os.getenv("TWILIO_AUTH_TOKEN")
40
  HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
41
 
42
 
 
 
 
 
43
  class ProgramType:
44
  BROWSER = "browser"
45
  ALERT = "alert"
@@ -47,6 +51,7 @@ class ProgramType:
47
  SMS = "sms"
48
  CONTACT = "contact"
49
  MESSAGE = "message"
 
50
 
51
 
52
  # validate json format
 
40
  HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
41
 
42
 
43
+ # actions flag :True -> Directly with Category_prompt
44
+ ACTION_FLAG = True
45
+
46
+
47
  class ProgramType:
48
  BROWSER = "browser"
49
  ALERT = "alert"
 
51
  SMS = "sms"
52
  CONTACT = "contact"
53
  MESSAGE = "message"
54
+ AUTO_TASK = "autotask"
55
 
56
 
57
  # validate json format
Brain/src/rising_plugin/guardrails-config/actions/actions.py CHANGED
@@ -17,6 +17,7 @@ import os
17
  import json
18
  import numpy as np
19
 
 
20
  from Brain.src.service.train_service import TrainService
21
  from langchain.docstore.document import Document
22
 
@@ -26,6 +27,7 @@ from Brain.src.common.utils import (
26
  COMMAND_BROWSER_OPEN,
27
  PINECONE_INDEX_NAME,
28
  DEFAULT_GPT_MODEL,
 
29
  )
30
  from Brain.src.model.req_model import ReqModel
31
  from Brain.src.model.requests.request_model import BasicReq
@@ -45,6 +47,7 @@ from Brain.src.rising_plugin.llm.llms import (
45
  GPT_4,
46
  FALCON_7B,
47
  GPT_LLM_MODELS,
 
48
  )
49
 
50
  from Brain.src.rising_plugin.pinecone_engine import (
@@ -80,6 +83,8 @@ query is json string with below format
80
  async def general_question(query):
81
  """init falcon model"""
82
  falcon_llm = FalconLLM()
 
 
83
  docs = []
84
 
85
  """step 0-->: parsing parms from the json query"""
@@ -89,16 +94,23 @@ async def general_question(query):
89
  raise BrainException(BrainException.JSON_PARSING_ISSUE_MSG)
90
  query = json_query["query"]
91
  image_search = json_query["image_search"]
92
- page_content = json_query["page_content"]
93
- document_id = json_query["document_id"]
94
  setting = ReqModel(json_query["setting"])
95
  is_browser = json_query["is_browser"]
96
-
97
- docs.append(Document(page_content=page_content, metadata=""))
98
- """ 1. calling gpt model to categorize for all message"""
99
- chain_data = get_llm_chain(model=DEFAULT_GPT_MODEL, setting=setting).run(
100
- input_documents=docs, question=query
101
- )
 
 
 
 
 
 
 
 
102
  try:
103
  result = json.loads(chain_data)
104
  # check image query with only its text
@@ -110,10 +122,7 @@ async def general_question(query):
110
  """ 2. check program is message to handle it with falcon llm """
111
  if result["program"] == "message":
112
  if is_browser:
113
- result["program"] = "ask_website"
114
- else:
115
- # """FALCON_7B:"""
116
- result["content"] = falcon_llm.query(question=query)
117
  return json.dumps(result)
118
  except ValueError as e:
119
  # Check sms and browser query
@@ -123,7 +132,5 @@ async def general_question(query):
123
  return json.dumps({"program": "browser", "content": "https://google.com"})
124
 
125
  if is_browser:
126
- return json.dumps({"program": "ask_website", "content": ""})
127
- return json.dumps(
128
- {"program": "message", "content": falcon_llm.query(question=query)}
129
- )
 
17
  import json
18
  import numpy as np
19
 
20
+ from Brain.src.service.auto_task_service import AutoTaskService
21
  from Brain.src.service.train_service import TrainService
22
  from langchain.docstore.document import Document
23
 
 
27
  COMMAND_BROWSER_OPEN,
28
  PINECONE_INDEX_NAME,
29
  DEFAULT_GPT_MODEL,
30
+ ACTION_FLAG,
31
  )
32
  from Brain.src.model.req_model import ReqModel
33
  from Brain.src.model.requests.request_model import BasicReq
 
47
  GPT_4,
48
  FALCON_7B,
49
  GPT_LLM_MODELS,
50
+ CATEGORY_PROMPT,
51
  )
52
 
53
  from Brain.src.rising_plugin.pinecone_engine import (
 
83
  async def general_question(query):
84
  """init falcon model"""
85
  falcon_llm = FalconLLM()
86
+ autotask_service = AutoTaskService()
87
+ document_id = ""
88
  docs = []
89
 
90
  """step 0-->: parsing parms from the json query"""
 
94
  raise BrainException(BrainException.JSON_PARSING_ISSUE_MSG)
95
  query = json_query["query"]
96
  image_search = json_query["image_search"]
97
+
 
98
  setting = ReqModel(json_query["setting"])
99
  is_browser = json_query["is_browser"]
100
+ if ACTION_FLAG:
101
+ docs.append(Document(page_content=CATEGORY_PROMPT, metadata=""))
102
+ # temperature shouldbe 0.
103
+ chain_data = get_llm_chain(
104
+ model=DEFAULT_GPT_MODEL, setting=setting, temperature=0.0
105
+ ).run(input_documents=docs, question=query)
106
+ else:
107
+ document_id = json_query["document_id"]
108
+ page_content = json_query["page_content"]
109
+ docs.append(Document(page_content=page_content, metadata=""))
110
+ """ 1. calling gpt model to categorize for all message"""
111
+ chain_data = get_llm_chain(model=DEFAULT_GPT_MODEL, setting=setting).run(
112
+ input_documents=docs, question=query
113
+ )
114
  try:
115
  result = json.loads(chain_data)
116
  # check image query with only its text
 
122
  """ 2. check program is message to handle it with falcon llm """
123
  if result["program"] == "message":
124
  if is_browser:
125
+ result["program"] = "askwebsite"
 
 
 
126
  return json.dumps(result)
127
  except ValueError as e:
128
  # Check sms and browser query
 
132
  return json.dumps({"program": "browser", "content": "https://google.com"})
133
 
134
  if is_browser:
135
+ return json.dumps({"program": "askwebsite", "content": ""})
136
+ return json.dumps({"program": "message", "content": chain_data})
 
 
Brain/src/rising_plugin/llm/llms.py CHANGED
@@ -20,6 +20,38 @@ GPT_LLM_MODELS = [GPT_3_5_TURBO, GPT_4, GPT_4_32K]
20
  """exception message"""
21
  EXCEPTION_MSG = f"The model is not correct. It should be in {LLM_MODELS}"
22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  """validate model"""
24
 
25
 
@@ -61,7 +93,9 @@ def get_llm(
61
  """check model"""
62
  llm = GptLLM(openai_key=setting.openai_key)
63
  if model == GPT_3_5_TURBO or model == GPT_4 or model == GPT_4_32K:
64
- llm = GptLLM(model=model, openai_key=setting.openai_key)
 
 
65
  elif model == FALCON_7B:
66
  llm = FalconLLM(temperature=temperature, max_new_tokens=max_new_tokens)
67
  return llm
 
20
  """exception message"""
21
  EXCEPTION_MSG = f"The model is not correct. It should be in {LLM_MODELS}"
22
 
23
+
24
+ """prompt"""
25
+ CATEGORY_PROMPT = """
26
+ If user is going to say about a image with its description to search, please answer belowing json format. {"program": "image", "content": "description of the image that user is going to search"}
27
+ If user is going to ask about a image, please answer belowing json format. {"program": "image", "content": "description of the image that user is going to search"}
28
+ If user said that send sms or text, please answer belowing json format. {"program": "sms", "content": "ask who"}
29
+ If user said that compose, write, or create an sms message, please answer belowing json format. {"program": "sms", "content": "ask who"}
30
+ If user said that search contact with its description such as display name or phone number, please answer belowing json format. {"program": "contact", "content": "description of the contact that user is going to search"}
31
+ If user said that open a tab, go to a tab, or open a page, please answer belowing json format. {"program": "opentab", "content": ""}
32
+ If user said that open a tab and search, go to a tab and search, or open a page and search, please answer belowing json format. {"program": "opentabsearch", "content": "keyword that user is going to search"}
33
+ If user said that close a tab, please answer belowing json format. {"program": "close_tab", "content": ""}
34
+ If user said that launch a browser or open a browser, please answer belowing json format. {"program": "browser", "content": "https://google.com"}
35
+ If user said that go to a previous page, or open a previous page, please answer belowing json format. {"program": "previouspage", "content": ""}
36
+ If user said that go to a next page, or open a next page, please answer belowing json format. {"program": "nextpage", "content": ""}
37
+ If user said that scroll up, scroll up page, or page scroll up, please answer belowing json format. {"program": "scrollup", "content": ""}
38
+ If user said that scroll down, scroll down page, page scroll down, please answer belowing json format. {"program": "scrolldown", "content": ""}
39
+ If user said that scroll top, scroll top page, or scroll top of page, please answer belowing json format. {"program": "scrolltop", "content": ""}
40
+ If user said that scroll bottom, scroll bottom page, or scroll bottom of page, please answer belowing json format. {"program": "scrollbottom", "content": ""}
41
+ If user is going to select an item, an article or a website with its description in a web browser, please answer belowing json format. {"program": "selectitemdetailinfo", "content": "the description of an item, an article or a website in a browser"}
42
+ If user said that ask about the content in website, for example, if users ask something like 'what is #query in this website?' or 'Can you tell me about #query based on this website?', please answer belowing json format. {"program": "askwebsite", "content": ""}
43
+ If user said that open a website using web browsers, please answer belowing json format. The url user is going to open can exist or not. If user doesn\\'t say exact url and want to open some sites, you have to find the best proper url. If user didn\\'t say any url and you can't find proper url, please set website url to "https://www.google.com". {"program": "browser", "url": "website url that user is going to open"}
44
+ If users are going to ask something based on the data of website, please answer belowing json format. {"program": "askwebsite", "content": ""}
45
+ If user is going to set or create alarm with time and label, please answer belowing json format.\n {"program": "alarm", "content": {"type":"create", "time":"please set time as 24-hours format that user is going to set. If user did not provide any alarm time, set "0:0"", "label":"please set label that user is going to set. If user did not provide any label, set "alarm""}}\n This is example data.\n User: Set an alarm.\n AI: {"program":"alarm", "content": {"type":"create", "time":"0:0", "label":"alarm"}}\n User: Set an alarm with label as "wake up".\n AI: {"program":"alarm", "content": {"type":"create", "time":"0:0", "label":"wake up"}}\n User: Set an alarm for 5:00 AM.\n AI: {"program":"alarm", "content": {"type":"create", "time":"5:00", "label":"alarm"}}\n User: Set an alarm for 5:00 PM with label as "wake up".\n AI: {"program":"alarm", "content": {"type":"create", "time":"17:00", "label":"wake up"}}
46
+ If user is going to read email or message, please answer belowing json format. {"program": "reademail", "content": ""}
47
+ If user is going to send email or message, please answer belowing json format. {"program": "sendemail", "content": ""}
48
+ If user is going to compose, write, or create an email, please answer belowing json format. {"program": "writeemail", "content": ""}
49
+ User wants to help organization in achieving its goals, which bridges the gap between the user is (present) and where he/she wants to go (future), or that is deciding in advance what to do, how to do when to do it and by whom it is to be done. Also the description ensures in thoughts and actions, work is carried on smoothly without any confusion and misunderstanding. \n And it can be done for the future and the future is full of uncertainties. \n So it looks like to make a decision as well. It helps make rational decisions by choosing the best most profitable alternative which may bring lower cost, adaptable to the organization and situations. \n If user is going to say about planning strategy, or something like that , please answer belowing json format. {"program": "autotask", "content": ""}
50
+ If all of above is not correct, please give the most appropriate answer to the user's question. Please answer belowing json format. {"program":"message", "content":"your answer"}
51
+ If users are going to know about real-time capabilities such as News, Weather, Stocks, Booking, Planning, or etc, then please answer belowing json format. {"program": "autotask", "content": ""}
52
+ If your answer is not correct with the program type which mentioned in this rules, please answer belowing json format. {"program": "autotask", "content": ""}
53
+ """
54
+
55
  """validate model"""
56
 
57
 
 
93
  """check model"""
94
  llm = GptLLM(openai_key=setting.openai_key)
95
  if model == GPT_3_5_TURBO or model == GPT_4 or model == GPT_4_32K:
96
+ llm = GptLLM(
97
+ model=model, openai_key=setting.openai_key, temperature=temperature
98
+ )
99
  elif model == FALCON_7B:
100
  llm = FalconLLM(temperature=temperature, max_new_tokens=max_new_tokens)
101
  return llm
Brain/src/rising_plugin/risingplugin.py CHANGED
@@ -28,6 +28,7 @@ from ..common.utils import (
28
  DEFAULT_GPT_MODEL,
29
  parseJsonFromCompletion,
30
  PINECONE_INDEX_NAME,
 
31
  )
32
  from .image_embedding import (
33
  query_image_text,
@@ -59,30 +60,46 @@ def llm_rails(
59
  image_search: bool = True,
60
  is_browser: bool = False,
61
  ) -> Any:
62
- """step 0: convert string to json"""
63
- index = init_pinecone(index_name=PINECONE_INDEX_NAME, setting=setting)
64
- train_service = TrainService(firebase_app=firebase_app, setting=setting)
 
65
 
66
- """step 1: handle with gpt-4"""
67
 
68
- query_result = get_embed(data=query, setting=setting)
69
- try:
70
- relatedness_data = index.query(
71
- vector=query_result,
72
- top_k=1,
73
- include_values=False,
74
- namespace=train_service.get_pinecone_index_train_namespace(),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
  )
76
- except Exception as ex:
77
- raise BrainException(code=508, message=responses[508])
78
- if len(relatedness_data["matches"]) == 0:
79
- return str({"program": "message", "content": ""})
80
- document_id = relatedness_data["matches"][0]["id"]
81
-
82
- document = train_service.read_one_document(document_id)
83
- page_content = document["page_content"]
84
-
85
- message = rails_app.generate(
86
  messages=[
87
  {
88
  "role": "user",
@@ -90,14 +107,11 @@ def llm_rails(
90
  setting=setting,
91
  query=query,
92
  image_search=image_search,
93
- page_content=page_content,
94
- document_id=document_id,
95
  is_browser=is_browser,
96
  ),
97
  }
98
  ]
99
  )
100
- return message
101
 
102
 
103
  def processLargeText(
@@ -309,9 +323,9 @@ def rails_input_with_args(
309
  setting: ReqModel,
310
  query: str,
311
  image_search: bool,
312
- page_content: str,
313
- document_id: str,
314
  is_browser: bool,
 
 
315
  ) -> str:
316
  # convert json with params for rails.
317
  json_query_with_params = {
 
28
  DEFAULT_GPT_MODEL,
29
  parseJsonFromCompletion,
30
  PINECONE_INDEX_NAME,
31
+ ACTION_FLAG,
32
  )
33
  from .image_embedding import (
34
  query_image_text,
 
60
  image_search: bool = True,
61
  is_browser: bool = False,
62
  ) -> Any:
63
+ if not ACTION_FLAG:
64
+ """step 0: convert string to json"""
65
+ index = init_pinecone(index_name=PINECONE_INDEX_NAME, setting=setting)
66
+ train_service = TrainService(firebase_app=firebase_app, setting=setting)
67
 
68
+ """step 1: handle with gpt-4"""
69
 
70
+ query_result = get_embed(data=query, setting=setting)
71
+ try:
72
+ relatedness_data = index.query(
73
+ vector=query_result,
74
+ top_k=1,
75
+ include_values=False,
76
+ namespace=train_service.get_pinecone_index_train_namespace(),
77
+ )
78
+ except Exception as ex:
79
+ raise BrainException(code=508, message=responses[508])
80
+ if len(relatedness_data["matches"]) == 0:
81
+ return str({"program": "message", "content": ""})
82
+ document_id = relatedness_data["matches"][0]["id"]
83
+
84
+ document = train_service.read_one_document(document_id)
85
+ page_content = document["page_content"]
86
+
87
+ return rails_app.generate(
88
+ messages=[
89
+ {
90
+ "role": "user",
91
+ "content": rails_input_with_args(
92
+ setting=setting,
93
+ query=query,
94
+ image_search=image_search,
95
+ page_content=page_content,
96
+ document_id=document_id,
97
+ is_browser=is_browser,
98
+ ),
99
+ }
100
+ ]
101
  )
102
+ return rails_app.generate(
 
 
 
 
 
 
 
 
 
103
  messages=[
104
  {
105
  "role": "user",
 
107
  setting=setting,
108
  query=query,
109
  image_search=image_search,
 
 
110
  is_browser=is_browser,
111
  ),
112
  }
113
  ]
114
  )
 
115
 
116
 
117
  def processLargeText(
 
323
  setting: ReqModel,
324
  query: str,
325
  image_search: bool,
 
 
326
  is_browser: bool,
327
+ page_content: str = "",
328
+ document_id: str = "",
329
  ) -> str:
330
  # convert json with params for rails.
331
  json_query_with_params = {
Brain/src/router/api.py CHANGED
@@ -30,6 +30,7 @@ from Brain.src.rising_plugin.image_embedding import embed_image_text, query_imag
30
  from Brain.src.logs import logger
31
  from Brain.src.model.basic_model import BasicModel
32
  from Brain.src.model.feedback_model import FeedbackModel
 
33
  from Brain.src.service.command_service import CommandService
34
  from Brain.src.service.contact_service import ContactsService
35
  from Brain.src.service.feedback_service import FeedbackService
@@ -88,6 +89,13 @@ def construct_blueprint_api() -> APIRouter:
88
  # check contact querying
89
  try:
90
  contacts_service = ContactsService(setting=setting)
 
 
 
 
 
 
 
91
  if result["program"] == ProgramType.CONTACT:
92
  # querying contacts to getting its expected results
93
  contacts_results = contacts_service.query_contacts(
 
30
  from Brain.src.logs import logger
31
  from Brain.src.model.basic_model import BasicModel
32
  from Brain.src.model.feedback_model import FeedbackModel
33
+ from Brain.src.service.auto_task_service import AutoTaskService
34
  from Brain.src.service.command_service import CommandService
35
  from Brain.src.service.contact_service import ContactsService
36
  from Brain.src.service.feedback_service import FeedbackService
 
89
  # check contact querying
90
  try:
91
  contacts_service = ContactsService(setting=setting)
92
+ if result["program"] == ProgramType.AUTO_TASK:
93
+ auto_task_service = AutoTaskService()
94
+ result["content"] = auto_task_service.ask_task_with_autogpt(
95
+ query=query, firebase_app=firebase_app, setting=setting
96
+ )
97
+ return assembler.to_response(200, "", result)
98
+
99
  if result["program"] == ProgramType.CONTACT:
100
  # querying contacts to getting its expected results
101
  contacts_results = contacts_service.query_contacts(