thomas commited on
Commit
20555bc
·
1 Parent(s): 5a899d0

feature: added plugin as sub package and updated all the response format.

Browse files
requirements.txt CHANGED
@@ -1,6 +1,5 @@
1
- aiohttp==3.8.4
2
- aiosignal==1.3.1
3
- astroid==2.15.3
4
  async-timeout==4.0.2
5
  attrs==22.2.0
6
  black==23.3.0
@@ -64,38 +63,8 @@ pinecone-client==2.2.1
64
  platformdirs==3.2.0
65
  pluggy==1.0.0
66
  proto-plus==1.22.2
67
- protobuf==4.22.3
68
- pyasn1==0.4.8
69
- pyasn1-modules==0.2.8
70
- pycparser==2.21
71
- PyJWT==2.6.0
72
- pylint==2.17.2
73
- pyparsing==3.0.9
74
- pystache==0.6.0
75
- pytest==7.3.0
76
- python-dateutil==2.8.2
77
- python-dotenv==1.0.0
78
- PyYAML==6.0
79
- regex==2023.3.23
80
- replicate==0.8.1
81
- requests==2.28.2
82
- rsa==4.9
83
- six==1.16.0
84
- slack==0.0.2
85
- SQLAlchemy==1.4.47
86
- tenacity==8.2.2
87
- tiktoken==0.3.3
88
- tomli==2.0.1
89
- tomlkit==0.11.7
90
- tqdm==4.65.0
91
- typing-inspect==0.8.0
92
- typing_extensions==4.5.0
93
- uritemplate==4.1.1
94
- urllib3==1.26.15
95
- virtualenv==20.21.0
96
- Werkzeug==2.2.3
97
  win32-setctime==1.1.0
98
  wrapt==1.15.0
99
  yarl==1.8.2
100
  twilio==8.2.1
101
- rising-plugin==0.1.8
 
1
+ langchain >= 0.0.148
2
+ replicate==0.8.1
 
3
  async-timeout==4.0.2
4
  attrs==22.2.0
5
  black==23.3.0
 
63
  platformdirs==3.2.0
64
  pluggy==1.0.0
65
  proto-plus==1.22.2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  win32-setctime==1.1.0
67
  wrapt==1.15.0
68
  yarl==1.8.2
69
  twilio==8.2.1
70
+ nemoguardrails==0.2.0
sauce_tests/01 Send Notification/input.yaml CHANGED
@@ -5,7 +5,7 @@
5
  value: https://
6
  - id: variable
7
  name: domain
8
- value: smartphone.herokuapp.com
9
  - id: variable
10
  name: endpoint
11
  value: /sendNotification
 
5
  value: https://
6
  - id: variable
7
  name: domain
8
+ value: ttt246-brain.hf.space
9
  - id: variable
10
  name: endpoint
11
  value: /sendNotification
sauce_tests/02 Upload Image/input.yaml CHANGED
@@ -5,7 +5,7 @@
5
  value: https://
6
  - id: variable
7
  name: domain
8
- value: smartphone.herokuapp.com
9
  - id: variable
10
  name: endpoint
11
  value: /uploadImage
 
5
  value: https://
6
  - id: variable
7
  name: domain
8
+ value: ttt246-brain.hf.space
9
  - id: variable
10
  name: endpoint
11
  value: /uploadImage
sauce_tests/03 Image Relatedness/input.yaml CHANGED
@@ -5,7 +5,7 @@
5
  value: https://
6
  - id: variable
7
  name: domain
8
- value: smartphone.herokuapp.com
9
  - id: variable
10
  name: endpoint
11
  value: /image_relatedness
 
5
  value: https://
6
  - id: variable
7
  name: domain
8
+ value: ttt246-brain.hf.space
9
  - id: variable
10
  name: endpoint
11
  value: /image_relatedness
sauce_tests/04 Add Feedback/input.yaml CHANGED
@@ -5,7 +5,7 @@
5
  value: https://
6
  - id: variable
7
  name: domain
8
- value: smartphone.herokuapp.com
9
  - id: variable
10
  name: endpoint
11
  value: /feedback
 
5
  value: https://
6
  - id: variable
7
  name: domain
8
+ value: ttt246-brain.hf.space
9
  - id: variable
10
  name: endpoint
11
  value: /feedback
sauce_tests/05 Get Feedback/input.yaml CHANGED
@@ -5,7 +5,7 @@
5
  value: https://
6
  - id: variable
7
  name: domain
8
- value: smartphone.herokuapp.com
9
  - id: variable
10
  name: endpoint
11
  value: /feedback/test/1
 
5
  value: https://
6
  - id: variable
7
  name: domain
8
+ value: ttt246-brain.hf.space
9
  - id: variable
10
  name: endpoint
11
  value: /feedback/test/1
sauce_tests/06 Get Commands/input.yaml CHANGED
@@ -5,7 +5,7 @@
5
  value: https://
6
  - id: variable
7
  name: domain
8
- value: smartphone.herokuapp.com
9
  - id: variable
10
  name: endpoint
11
  value: /commands
 
5
  value: https://
6
  - id: variable
7
  name: domain
8
+ value: ttt246-brain.hf.space
9
  - id: variable
10
  name: endpoint
11
  value: /commands
sauce_tests/07 Rising Chat/input.yaml CHANGED
@@ -5,7 +5,7 @@
5
  value: https://
6
  - id: variable
7
  name: domain
8
- value: smartphone.herokuapp.com
9
  - id: variable
10
  name: endpoint
11
  value: /chat_rising
 
5
  value: https://
6
  - id: variable
7
  name: domain
8
+ value: ttt246-brain.hf.space
9
  - id: variable
10
  name: endpoint
11
  value: /chat_rising
sauce_tests/08 Train Contacts/input.yaml CHANGED
@@ -5,7 +5,7 @@
5
  value: https://
6
  - id: variable
7
  name: domain
8
- value: smartphone.herokuapp.com
9
  - id: variable
10
  name: endpoint
11
  value: /train/contacts
 
5
  value: https://
6
  - id: variable
7
  name: domain
8
+ value: ttt246-brain.hf.space
9
  - id: variable
10
  name: endpoint
11
  value: /train/contacts
src/common/utils.py CHANGED
@@ -30,7 +30,7 @@ AGENT_NAME = "RisingBrain Assistant"
30
 
31
  # indexes of relatedness of embedding
32
  COMMAND_SMS_INDEXS = [4, 5]
33
-
34
 
35
  # Twilio
36
  ACCOUNT_SID = os.getenv("TWILIO_ACCOUNT_SID")
@@ -46,3 +46,21 @@ def get_firebase_cred():
46
  else:
47
  cred = json.loads(FIREBASE_ENV)
48
  return credentials.Certificate(cred)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
  # indexes of relatedness of embedding
32
  COMMAND_SMS_INDEXS = [4, 5]
33
+ COMMAND_BROWSER_OPEN = [10]
34
 
35
  # Twilio
36
  ACCOUNT_SID = os.getenv("TWILIO_ACCOUNT_SID")
 
46
  else:
47
  cred = json.loads(FIREBASE_ENV)
48
  return credentials.Certificate(cred)
49
+
50
+
51
+ class ProgramType:
52
+ BROWSER = "browser"
53
+ ALERT = "alert"
54
+ IMAGE = "image"
55
+ SMS = "sms"
56
+ CONTACT = "contact"
57
+ MESSAGE = "message"
58
+
59
+
60
+ # validate json format
61
+ def validateJSON(jsonData):
62
+ try:
63
+ json.loads(jsonData)
64
+ except ValueError as err:
65
+ return False
66
+ return True
src/rising_plugin/__init__.py ADDED
File without changes
src/rising_plugin/csv_embed.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from langchain.document_loaders.csv_loader import CSVLoader
4
+ from langchain.embeddings.openai import OpenAIEmbeddings
5
+ import json
6
+
7
+ from ..common.utils import OPENAI_API_KEY
8
+
9
+
10
+ def csv_embed():
11
+ file_path = os.path.dirname(os.path.abspath(__file__))
12
+ loader = CSVLoader(
13
+ file_path=f"{file_path}/guardrails-config/actions/phone.csv", encoding="utf8"
14
+ )
15
+ data = loader.load()
16
+
17
+ result = list()
18
+ for t in data:
19
+ query_result = get_embed(t.page_content)
20
+ result.append(query_result)
21
+ with open(f"{file_path}/guardrails-config/actions/phone.json", "w") as outfile:
22
+ json.dump(result, outfile, indent=2)
23
+
24
+
25
+ """getting embed"""
26
+
27
+
28
+ def get_embed(data: str) -> str:
29
+ embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
30
+ return embeddings.embed_query(data)
31
+
32
+
33
+ if __name__ == "__main__":
34
+ csv_embed()
src/rising_plugin/guardrails-config/__init__.py ADDED
File without changes
src/rising_plugin/guardrails-config/actions/__init__.py ADDED
File without changes
src/rising_plugin/guardrails-config/actions/actions.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ import json
18
+ import numpy as np
19
+
20
+ from langchain.chat_models import ChatOpenAI
21
+ from langchain.embeddings.openai import OpenAIEmbeddings
22
+ from langchain.vectorstores import utils
23
+ from langchain.document_loaders.csv_loader import CSVLoader
24
+ from langchain.chains.question_answering import load_qa_chain
25
+ from langchain.docstore.document import Document
26
+
27
+ from src.common.utils import (
28
+ OPENAI_API_KEY,
29
+ COMMAND_SMS_INDEXS,
30
+ COMMAND_BROWSER_OPEN,
31
+ )
32
+ from src.rising_plugin.image_embedding import (
33
+ query_image_text,
34
+ )
35
+
36
+ from nemoguardrails.actions import action
37
+
38
+
39
+ @action()
40
+ async def general_question(query, model, uuid, image_search):
41
+ llm = ChatOpenAI(model_name=model, temperature=0, openai_api_key=OPENAI_API_KEY)
42
+ chain = load_qa_chain(llm, chain_type="stuff")
43
+ file_path = os.path.dirname(os.path.abspath(__file__))
44
+
45
+ with open(f"{file_path}/phone.json", "r") as infile:
46
+ data = json.load(infile)
47
+ embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
48
+
49
+ query_result = embeddings.embed_query(query)
50
+ doc_list = utils.maximal_marginal_relevance(np.array(query_result), data, k=1)
51
+ loader = CSVLoader(file_path=f"{file_path}/phone.csv", encoding="utf8")
52
+ csv_text = loader.load()
53
+
54
+ docs = []
55
+
56
+ for res in doc_list:
57
+ docs.append(
58
+ Document(
59
+ page_content=csv_text[res].page_content, metadata=csv_text[res].metadata
60
+ )
61
+ )
62
+
63
+ chain_data = chain.run(input_documents=docs, question=query)
64
+ try:
65
+ result = json.loads(chain_data)
66
+ # check image query with only its text
67
+ if result["program"] == "image":
68
+ if image_search:
69
+ result["content"] = json.dumps(
70
+ {"image_name": query_image_text(result["content"], "", uuid)}
71
+ )
72
+ # else:
73
+ # return result
74
+ return str(result)
75
+ except ValueError as e:
76
+ # Check sms and browser query
77
+ if doc_list[0] in COMMAND_SMS_INDEXS:
78
+ return str({"program": "sms", "content": chain_data})
79
+ elif doc_list[0] in COMMAND_BROWSER_OPEN:
80
+ return str({"program": "browser", "content": "https://google.com"})
81
+ return str({"program": "message", "content": chain_data})
src/rising_plugin/guardrails-config/actions/phone.csv ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ prompt template
2
+ "If user said that open a website using web browsers, please answer belowing json format. The url user is going to open can exist or not. If user doesn't say exact url and want to open some sites, you have to find the best proper url. If user didn't say any url and you can't find proper url, please set website url to ""https://www.google.com"". {""program"": ""browser"", ""url"": ""website url that user is going to open""}"
3
+ "If user said that send notification or alert, please answer belowing json format. If user didn't say what to send, please set content to ""This is risingphone"". {""program"": ""alert"", ""content"": ""text that user is going to send""}"
4
+ "If user is going to say about a image with its description to search, please answer belowing json format. {""program"": ""image"", ""content"": ""description of the image that user is going to search""}"
5
+ "If user is going to ask about a image, please answer belowing json format. {""program"": ""image"", ""content"": ""description of the image that user is going to search""}"
6
+ "If user said that send sms or text, please answer belowing json format. {""program"": ""sms"", ""content"": ""ask who""}"
7
+ "If user said that compose, write, or create an sms message, please answer belowing json format. {""program"": ""sms"", ""content"": ""ask who""}"
8
+ "If user said that search contact with its description such as display name or phone number, please answer belowing json format. {""program"": ""contact"", ""content"": ""description of the contact that user is going to search""}"
9
+ "If user said that open a new tab, go to a new tab, or open a new page, please answer belowing json format. {""program"": ""open_tab"", ""content"": """"}"
10
+ "If user said that open a new tab and search, go to a new tab and search, or open a new page and search, please answer belowing json format. {""program"": ""open_tab_search"", ""content"": ""keyword that user is going to search""}"
11
+ "If user said that close a tab, please answer belowing json format. {""program"": ""close_tab"", ""content"": """"}"
12
+ "If user said that launch a browser or open a browser, please answer belowing json format. {""program"": ""browser"", ""content"": ""https://google.com""}"
13
+ "If user said that go to a previous page, or open a previous page, please answer belowing json format. {""program"": ""previous_page"", ""content"": """"}"
14
+ "If user said that go to a next page, or open a next page, please answer belowing json format. {""program"": ""next_page"", ""content"": """"}"
15
+ "If user said that scroll up, scroll up page, or page scroll up, please answer belowing json format. {""program"": ""scroll_up"", ""content"": """"}"
16
+ "If user said that scroll down, scroll down page, page scroll down, please answer belowing json format. {""program"": ""scroll_down"", ""content"": """"}"
17
+ "If user said that scroll top, scroll top page, or scroll top of page, please answer belowing json format. {""program"": ""scroll_top"", ""content"": """"}"
18
+ "If user said that scroll bottom, scroll bottom page, or scroll bottom of page, please answer belowing json format. {""program"": ""scroll_bottom"", ""content"": """"}"
19
+ "If all of above is not correct, please give the most appropriate answer to the user's question. Please answer belowing json format. {""program"":""message"", ""content"":""your answer""}"
src/rising_plugin/guardrails-config/actions/phone.json ADDED
The diff for this file is too large to render. See raw diff
 
src/rising_plugin/guardrails-config/config.yml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ instructions:
2
+ - type: general
3
+ content: |
4
+ Below is a conversation between a bot and a user about the recent job reports.
5
+ The bot is factual and concise. If the bot does not know the answer to a
6
+ question, it truthfully says it does not know.
7
+
8
+ sample_conversation: |
9
+ user "Hello there!"
10
+ express greeting
11
+ bot express greeting
12
+ "Hello! How can I assist you today?"
13
+ user "What can you do for me?"
14
+ ask about capabilities
15
+ bot respond about capabilities
16
+ "I am an Rising AI assistant which helps answer questions based on a given knowledge base."
17
+ user "thanks"
18
+ express appreciation
19
+ bot express appreciation and offer additional help
20
+ "You're welcome. If you have any more questions or if there's anything else I can help you with, please don't hesitate to ask."
21
+
22
+ models:
23
+ - type: main
24
+ engine: openai
25
+ model: text-davinci-003
src/rising_plugin/guardrails-config/general.co ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ define user ask capabilities
2
+ "What can you do?"
3
+ "What can you help me with?"
4
+ "tell me what you can do"
5
+ "tell me about you"
6
+ "How can I use your help?"
7
+
8
+ define flow
9
+ user ask capabilities
10
+ bot inform capabilities
11
+
12
+ define bot inform capabilities
13
+ '{"program": "message", "content": "I am an Rising AI assistant which helps answer questions based on a given knowledge base."}'
14
+
15
+ define flow
16
+ priority 0.9
17
+ user ...
18
+ $result = execute general_question(query=$last_user_message, model="gpt-3.5-turbo", uuid="", image_search=True)
19
+ bot $result
src/rising_plugin/guardrails-config/off-security.co ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ define user ask off security
2
+ "Give me security information."
3
+ "What is password?"
4
+ "What is pin code?"
5
+ "Please let me know password."
6
+ "Please let me know pin."
7
+ "Please let me know pin code."
8
+ "How about pin code?"
9
+ "security"
10
+ "password"
11
+ "pin"
12
+
13
+ define flow
14
+ user ask off security
15
+ bot explain cant off security
16
+
17
+ define bot explain cant off security
18
+ '{"program": "message", "content": "Sorry, I cannot comment on anything which is relevant to the password or pin code."}'
src/rising_plugin/image_embedding.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.embeddings.openai import OpenAIEmbeddings
2
+
3
+ from ..common.utils import OPENAI_API_KEY, PINECONE_NAMESPACE, PINECONE_INDEX_NAME
4
+ from .pinecone_engine import (
5
+ init_pinecone,
6
+ get_pinecone_index_namespace,
7
+ )
8
+
9
+
10
+ def get_embeddings():
11
+ return OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
12
+
13
+
14
+ def embed_image_text(image_text, image_name, uuid):
15
+ prompt_template = f"""
16
+ This is the text about the image.
17
+ ###
18
+ {image_text}
19
+ """
20
+
21
+ embed_image_text = get_embeddings().embed_query(prompt_template)
22
+ index = init_pinecone(PINECONE_INDEX_NAME)
23
+
24
+ upsert_response = index.upsert(
25
+ vectors=[{"id": image_name, "values": embed_image_text}],
26
+ namespace=get_pinecone_index_namespace(uuid),
27
+ )
28
+
29
+ if upsert_response == 0:
30
+ return "fail to embed image text"
31
+
32
+ return "success to embed image text"
33
+
34
+
35
+ def query_image_text(image_content, message, uuid):
36
+ embed_image_text = get_embeddings().embed_query(
37
+ get_prompt_image_with_message(image_content, message)
38
+ )
39
+ index = init_pinecone(PINECONE_INDEX_NAME)
40
+ relatedness_data = index.query(
41
+ vector=embed_image_text,
42
+ top_k=3,
43
+ include_values=False,
44
+ namespace=get_pinecone_index_namespace(uuid),
45
+ )
46
+ if len(relatedness_data["matches"]) > 0:
47
+ return relatedness_data["matches"][0]["id"]
48
+ return ""
49
+
50
+
51
+ def get_prompt_image_with_message(image_content, message):
52
+ prompt_template = f"""
53
+ This is the text about the image.
54
+ ###
55
+ {image_content}
56
+ ###
57
+ This message is the detailed description of the image.
58
+ ###
59
+ {message}
60
+ """
61
+
62
+ return prompt_template
src/rising_plugin/pinecone_engine.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # initialize pinecone
2
+ import pinecone
3
+ from typing import Any
4
+ from ..common.utils import (
5
+ PINECONE_KEY,
6
+ PINECONE_ENV,
7
+ PINECONE_INDEX_NAME,
8
+ PINECONE_NAMESPACE,
9
+ )
10
+
11
+ DIMENSION = 1536
12
+ METRIC = "cosine"
13
+ POD_TYPE = "p1.x1"
14
+
15
+
16
+ # get the existing index in pinecone or create a new one
17
+ def init_pinecone(index_name, flag=True):
18
+ pinecone.init(api_key=PINECONE_KEY, environment=PINECONE_ENV)
19
+ if flag:
20
+ return pinecone.Index(index_name)
21
+ else:
22
+ # create a new index in pinecone
23
+ return pinecone.create_index(
24
+ index_name, dimension=DIMENSION, metric=METRIC, pod_type=POD_TYPE
25
+ )
26
+
27
+
28
+ """add item in pinecone"""
29
+
30
+
31
+ def add_pinecone(namespace: str, key: str, value: str) -> Any:
32
+ index = init_pinecone(PINECONE_INDEX_NAME)
33
+
34
+ upsert_response = index.upsert(
35
+ vectors=[{"id": key, "values": value}],
36
+ namespace=namespace,
37
+ )
38
+ return upsert_response
39
+
40
+
41
+ """update item in pinecone"""
42
+
43
+
44
+ def update_pinecone(namespace: str, key: str, value: str) -> Any:
45
+ index = init_pinecone(PINECONE_INDEX_NAME)
46
+
47
+ upsert_response = index.update(
48
+ id=key,
49
+ values=value,
50
+ namespace=namespace,
51
+ )
52
+ return upsert_response
53
+
54
+
55
+ """delete item in pinecone"""
56
+
57
+
58
+ def delete_pinecone(namespace: str, key: str) -> Any:
59
+ index = init_pinecone(PINECONE_INDEX_NAME)
60
+ delete_response = index.delete(ids=[key], namespace=namespace)
61
+ return delete_response
62
+
63
+
64
+ """delete all item in the namespace"""
65
+
66
+
67
+ def delete_all_pinecone(namespace: str) -> Any:
68
+ index = init_pinecone(PINECONE_INDEX_NAME)
69
+ delete_response = index.delete(delete_all=True, namespace=namespace)
70
+ return delete_response
71
+
72
+
73
+ """generate index name of pinecone"""
74
+
75
+
76
+ def get_pinecone_index_name(uuid):
77
+ return PINECONE_INDEX_NAME + "-" + uuid
78
+
79
+
80
+ """generate a namespace of pinecone"""
81
+
82
+
83
+ def get_pinecone_index_namespace(uuid):
84
+ return PINECONE_NAMESPACE + "-" + uuid
src/rising_plugin/risingplugin.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import datetime
4
+ import openai
5
+ import replicate
6
+ import textwrap
7
+
8
+ from typing import Any
9
+
10
+ from nemoguardrails.rails import LLMRails, RailsConfig
11
+
12
+ from langchain.chat_models import ChatOpenAI
13
+
14
+ from firebase_admin import storage
15
+
16
+ from ..common.utils import (
17
+ OPENAI_API_KEY,
18
+ FIREBASE_STORAGE_ROOT,
19
+ )
20
+ from .image_embedding import (
21
+ query_image_text,
22
+ get_prompt_image_with_message,
23
+ )
24
+
25
+ # Give the path to the folder containing the rails
26
+ file_path = os.path.dirname(os.path.abspath(__file__))
27
+ config = RailsConfig.from_path(f"{file_path}/guardrails-config")
28
+
29
+ # set max_chunk_size = 1800 because of adding some string
30
+ max_chunk_size = 1800 # recommended max_chunk_size = 2048
31
+
32
+
33
+ def getChunks(query: str):
34
+ return textwrap.wrap(
35
+ query, width=max_chunk_size, break_long_words=False, replace_whitespace=False
36
+ )
37
+
38
+
39
+ def processLargeText(app: any, chunks: any):
40
+ if len(chunks) == 1:
41
+ message = app.generate(
42
+ messages=[
43
+ {
44
+ "role": "user",
45
+ "content": chunks[0],
46
+ }
47
+ ]
48
+ )
49
+ response_text = ""
50
+ try:
51
+ response_text += json.loads(message["content"])["content"]
52
+ except Exception as e:
53
+ # fmt: off
54
+ message["content"] = message["content"].replace("\'", '"')
55
+ # fmt: on
56
+ response_text = json.loads(message["content"])
57
+ return response_text
58
+ else:
59
+ response_text: str = ""
60
+ first_query = "The total length of the content that I want to send you is too large to send in only one piece.\nFor sending you that content, I will follow this rule:\n[START PART 1/10]\nThis is the content of the part 1 out of 10 in total\n[END PART 1/10]\nThen you just answer: 'Received part 1/10'\nAnd when I tell you 'ALL PART SENT', then you can continue processing the data and answering my requests."
61
+ message = app.generate(messages=[{"role": "user", "content": first_query}])
62
+ for index, chunk in enumerate(chunks):
63
+ # Process each chunk with ChatGPT
64
+ if index + 1 != len(chunks):
65
+ chunk_query = (
66
+ "Do not answer yet. This is just another part of the text I want to send you. Just receive and acknowledge as 'Part "
67
+ + str(index + 1)
68
+ + "/"
69
+ + str(len(chunks))
70
+ + "received' and wait for the next part.\n"
71
+ + "[START PART "
72
+ + str(index + 1)
73
+ + "/"
74
+ + str(len(chunks))
75
+ + "]\n"
76
+ + chunk
77
+ + "\n[END PART "
78
+ + str(index + 1)
79
+ + "/"
80
+ + str(len(chunks))
81
+ + "]\n"
82
+ + "Remember not answering yet. Just acknowledge you received this part with the message 'Part 1/10 received' and wait for the next part."
83
+ )
84
+ message = app.generate(
85
+ messages=[
86
+ {
87
+ "role": "user",
88
+ "content": chunk_query,
89
+ }
90
+ ]
91
+ )
92
+ else:
93
+ last_query = (
94
+ "[START PART "
95
+ + str(index + 1)
96
+ + "/"
97
+ + str(len(chunks))
98
+ + chunk
99
+ + "\n[END PART "
100
+ + str(index + 1)
101
+ + "/"
102
+ + str(len(chunks))
103
+ + "]\n"
104
+ + "ALL PART SENT. Now you can continue processing the request."
105
+ )
106
+ message = app.generate(
107
+ messages=[{"role": "user", "content": last_query}]
108
+ )
109
+ try:
110
+ response_text += json.loads(message["content"])["content"]
111
+ except Exception as e:
112
+ # fmt: off
113
+ message["content"] = message["content"].replace("\'", '"')
114
+ # fmt: on
115
+ response_text = json.loads(message["content"])["content"]
116
+ program = json.loads(message["content"])["program"]
117
+ return {"program": program, "content": response_text}
118
+ # out of for-loop
119
+
120
+
121
+ def getCompletion(
122
+ query,
123
+ model="gpt-3.5-turbo",
124
+ uuid="",
125
+ image_search=True,
126
+ ):
127
+ llm = ChatOpenAI(model_name=model, temperature=0, openai_api_key=OPENAI_API_KEY)
128
+
129
+ # Break input text into chunks
130
+ chunks = getChunks(query)
131
+
132
+ app = LLMRails(config, llm)
133
+ return processLargeText(app, chunks)
134
+
135
+
136
+ def query_image_ask(image_content, message, uuid):
137
+ prompt_template = get_prompt_image_with_message(image_content, message)
138
+ try:
139
+ data = getCompletion(query=prompt_template, uuid=uuid, image_search=False)
140
+ # chain_data = json.loads(data.replace("'", '"'))
141
+ # chain_data = json.loads(data)
142
+ if data["program"] == "image":
143
+ return True
144
+ except Exception as e:
145
+ return False
146
+ return False
147
+
148
+
149
+ def getTextFromImage(filename):
150
+ # Create a reference to the image file you want to download
151
+ bucket = storage.bucket()
152
+ blob = bucket.blob(FIREBASE_STORAGE_ROOT.__add__(filename))
153
+ download_url = ""
154
+
155
+ try:
156
+ # Download the image to a local file
157
+ download_url = blob.generate_signed_url(
158
+ datetime.timedelta(seconds=300), method="GET", version="v4"
159
+ )
160
+
161
+ output = replicate.run(
162
+ "salesforce/blip:2e1dddc8621f72155f24cf2e0adbde548458d3cab9f00c0139eea840d0ac4746",
163
+ input={"image": download_url},
164
+ )
165
+
166
+ except Exception as e:
167
+ output = str("Error happend while analyzing your prompt. Please ask me again :")
168
+
169
+ return str(output)
170
+
171
+
172
+ """chat with ai
173
+ response:
174
+ {
175
+ 'id': 'chatcmpl-6p9XYPYSTTRi0xEviKjjilqrWU2Ve',
176
+ 'object': 'chat.completion',
177
+ 'created': 1677649420,
178
+ 'model': 'gpt-3.5-turbo',
179
+ 'usage': {'prompt_tokens': 56, 'completion_tokens': 31, 'total_tokens': 87},
180
+ 'choices': [
181
+ {
182
+ 'message': {
183
+ 'role': 'assistant',
184
+ 'content': 'The 2020 World Series was played in Arlington, Texas at the Globe Life Field, which was the new home stadium for the Texas Rangers.'},
185
+ 'finish_reason': 'stop',
186
+ 'index': 0
187
+ }
188
+ ]
189
+ }
190
+ """
191
+
192
+
193
+ # Define a content filter function
194
+ def filter_guardrails(model: any, query: str):
195
+ llm = ChatOpenAI(model_name=model, temperature=0, openai_api_key=OPENAI_API_KEY)
196
+ app = LLMRails(config, llm)
197
+
198
+ # split query with chunks
199
+ chunks = getChunks(query)
200
+
201
+ # get message from guardrails
202
+ message = processLargeText(app, chunks)
203
+
204
+ if (
205
+ message
206
+ == "Sorry, I cannot comment on anything which is relevant to the password or pin code."
207
+ or message
208
+ == "I am an Rising AI assistant which helps answer questions based on a given knowledge base."
209
+ ):
210
+ return message
211
+ else:
212
+ return ""
213
+
214
+
215
+ def handle_chat_completion(messages: Any, model: str = "gpt-3.5-turbo") -> Any:
216
+ openai.api_key = OPENAI_API_KEY
217
+
218
+ response = openai.ChatCompletion.create(
219
+ model=model,
220
+ messages=messages,
221
+ )
222
+
223
+ # Filter the reply using the content filter
224
+ result = filter_guardrails(model, messages[-1]["content"])
225
+
226
+ if result == "":
227
+ return response
228
+ else:
229
+ response["choices"][0]["message"]["content"] = result
230
+ return response
src/router/api.py CHANGED
@@ -2,18 +2,18 @@ import json
2
  import os
3
 
4
  from flask import Blueprint, request, jsonify, send_from_directory
5
- from rising_plugin.common.utils import ProgramType
6
 
7
  from src.common.assembler import Assembler
8
- from rising_plugin.risingplugin import (
 
9
  getCompletion,
10
  getTextFromImage,
11
  query_image_ask,
12
  handle_chat_completion,
13
  )
14
  from src.firebase.cloudmessage import send_message, get_tokens
15
- from rising_plugin.csv_embed import csv_embed
16
- from rising_plugin.image_embedding import embed_image_text, query_image_text
17
 
18
  from src.logs import logger
19
  from src.model.basic_model import BasicModel
@@ -55,23 +55,19 @@ def construct_blueprint_api():
55
 
56
  # check contact querying
57
  try:
58
- result_json = eval(result)
59
- if result_json["program"] == ProgramType.CONTACT:
60
  # querying contacts to getting its expected results
61
  contacts_results = contacts_service.query_contacts(
62
- uuid=uuid, search=result_json["content"]
63
  )
64
- result_json["content"] = str(contacts_results)
65
- result = str(result_json)
66
  except Exception as e:
67
- logger.error(title="sendNotification", message=result)
68
 
69
- notification = {"title": "alert", "content": result}
70
 
71
  state, value = send_message(notification, [token])
72
- response = jsonify({"message": value, "result": result})
73
- response.status_code = 200
74
- return response
75
 
76
  """@generator.response(
77
  status_code=200, schema={"message": "message", "result": "test_result"}
@@ -98,9 +94,7 @@ def construct_blueprint_api():
98
  notification = {"title": "alert", "content": embed_result}
99
 
100
  state, value = send_message(notification, [token])
101
- response = jsonify({"message": value, "result": result})
102
- response.status_code = 200
103
- return response
104
 
105
  """@generator.response(
106
  status_code=200, schema={"message": "message", "result": "test_result"}
@@ -138,19 +132,12 @@ def construct_blueprint_api():
138
 
139
  notification = {"title": "alert", "content": json.dumps(image_response)}
140
  state, value = send_message(notification, [token])
141
- response = jsonify(
142
- {
143
- "message": value,
144
- "result": json.dumps(
145
- {
146
- "program": "image",
147
- "content": json.dumps(image_response),
148
- }
149
- ),
150
- }
151
  )
152
- response.status_code = 200
153
- return response
154
 
155
  @api.route("/file/<string:filename>")
156
  def get_swagger_file(filename):
@@ -208,7 +195,7 @@ def construct_blueprint_api():
208
  @api.route("/feedback/<string:search>/<int:rating>")
209
  def get_feedback(search, rating):
210
  result = feedback_service.get(search, rating)
211
- return assembler.to_response(200, "added successfully", json.dumps(result))
212
 
213
  """@generator.response(
214
  status_code=200, schema={"message": "message", "result": "test_result"}
@@ -218,9 +205,7 @@ def construct_blueprint_api():
218
  def get_commands():
219
  result = command_service.get()
220
  return assembler.to_response(
221
- 200,
222
- "success",
223
- json.dumps({"program": "help_command", "content": json.dumps(result)}),
224
  )
225
 
226
  """@generator.request_body(
@@ -268,14 +253,10 @@ def construct_blueprint_api():
268
  return assembler.to_response(
269
  200,
270
  "added successfully",
271
- json.dumps(
272
- {
273
- "program": "agent",
274
- "message": json.dumps(
275
- assistant_reply.get_one_message_item().to_json()
276
- ),
277
- }
278
- ),
279
  )
280
 
281
  """@generator.request_body(
 
2
  import os
3
 
4
  from flask import Blueprint, request, jsonify, send_from_directory
 
5
 
6
  from src.common.assembler import Assembler
7
+ from src.common.utils import ProgramType
8
+ from src.rising_plugin.risingplugin import (
9
  getCompletion,
10
  getTextFromImage,
11
  query_image_ask,
12
  handle_chat_completion,
13
  )
14
  from src.firebase.cloudmessage import send_message, get_tokens
15
+ from src.rising_plugin.csv_embed import csv_embed
16
+ from src.rising_plugin.image_embedding import embed_image_text, query_image_text
17
 
18
  from src.logs import logger
19
  from src.model.basic_model import BasicModel
 
55
 
56
  # check contact querying
57
  try:
58
+ if result["program"] == ProgramType.CONTACT:
 
59
  # querying contacts to getting its expected results
60
  contacts_results = contacts_service.query_contacts(
61
+ uuid=uuid, search=result["content"]
62
  )
63
+ result["content"] = str(contacts_results)
 
64
  except Exception as e:
65
+ logger.error(title="sendNotification", message=json.dumps(result))
66
 
67
+ notification = {"title": "alert", "content": json.dumps(result)}
68
 
69
  state, value = send_message(notification, [token])
70
+ return assembler.to_response(200, value, result)
 
 
71
 
72
  """@generator.response(
73
  status_code=200, schema={"message": "message", "result": "test_result"}
 
94
  notification = {"title": "alert", "content": embed_result}
95
 
96
  state, value = send_message(notification, [token])
97
+ return assembler.to_response(200, value, result)
 
 
98
 
99
  """@generator.response(
100
  status_code=200, schema={"message": "message", "result": "test_result"}
 
132
 
133
  notification = {"title": "alert", "content": json.dumps(image_response)}
134
  state, value = send_message(notification, [token])
135
+
136
+ return assembler.to_response(
137
+ code=200,
138
+ message=value,
139
+ result={"program": "image", "content": image_response},
 
 
 
 
 
140
  )
 
 
141
 
142
  @api.route("/file/<string:filename>")
143
  def get_swagger_file(filename):
 
195
  @api.route("/feedback/<string:search>/<int:rating>")
196
  def get_feedback(search, rating):
197
  result = feedback_service.get(search, rating)
198
+ return assembler.to_response(200, "added successfully", result)
199
 
200
  """@generator.response(
201
  status_code=200, schema={"message": "message", "result": "test_result"}
 
205
  def get_commands():
206
  result = command_service.get()
207
  return assembler.to_response(
208
+ 200, "success", {"program": "help_command", "content": result}
 
 
209
  )
210
 
211
  """@generator.request_body(
 
253
  return assembler.to_response(
254
  200,
255
  "added successfully",
256
+ {
257
+ "program": "agent",
258
+ "message": assistant_reply.get_one_message_item().to_json(),
259
+ },
 
 
 
 
260
  )
261
 
262
  """@generator.request_body(
src/service/contact_service.py CHANGED
@@ -1,8 +1,8 @@
1
  """service to manage contacts"""
2
  from typing import List, Any
3
 
4
- from rising_plugin.csv_embed import get_embed
5
- from rising_plugin.pinecone_engine import (
6
  get_pinecone_index_namespace,
7
  update_pinecone,
8
  init_pinecone,
 
1
  """service to manage contacts"""
2
  from typing import List, Any
3
 
4
+ from src.rising_plugin.csv_embed import get_embed
5
+ from src.rising_plugin.pinecone_engine import (
6
  get_pinecone_index_namespace,
7
  update_pinecone,
8
  init_pinecone,
src/service/llm/chat_service.py CHANGED
@@ -4,7 +4,7 @@ import time
4
  from openai.error import RateLimitError
5
 
6
  from src.common.utils import AGENT_NAME, GPT_MODEL
7
- from rising_plugin.risingplugin import handle_chat_completion
8
  from src.logs import logger
9
  from src.model.chat_response_model import ChatResponseModel
10
  from src.model.message_model import MessageModel
 
4
  from openai.error import RateLimitError
5
 
6
  from src.common.utils import AGENT_NAME, GPT_MODEL
7
+ from src.rising_plugin.risingplugin import handle_chat_completion
8
  from src.logs import logger
9
  from src.model.chat_response_model import ChatResponseModel
10
  from src.model.message_model import MessageModel
tests/functional/test_recipes.py CHANGED
@@ -1,9 +1,9 @@
1
  import re
2
 
3
- from rising_plugin.risingplugin import getCompletion, getTextFromImage
4
  from src.firebase.cloudmessage import send_message
5
  from src.firebase.cloudmessage import get_tokens
6
- from rising_plugin.image_embedding import query_image_text
7
 
8
  TEST_IAMGE_NAME = "0ddffe51-3763-48d9-ab74-2086de529217"
9
  TEST_UUID = "TEST_UUID"
 
1
  import re
2
 
3
+ from src.rising_plugin.risingplugin import getCompletion, getTextFromImage
4
  from src.firebase.cloudmessage import send_message
5
  from src.firebase.cloudmessage import get_tokens
6
+ from src.rising_plugin.image_embedding import query_image_text
7
 
8
  TEST_IAMGE_NAME = "0ddffe51-3763-48d9-ab74-2086de529217"
9
  TEST_UUID = "TEST_UUID"